code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : int = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
56
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase_ : Union[str, Any] = pytest.mark.integration UpperCAmelCase_ : List[Any] = {'comet'} UpperCAmelCase_ : int = importlib.util.find_spec('fairseq') is not None UpperCAmelCase_ : Optional[Any] = {'code_eval'} UpperCAmelCase_ : Optional[int] = os.name == 'nt' UpperCAmelCase_ : Dict = {'bertscore', 'frugalscore', 'perplexity'} UpperCAmelCase_ : Dict = importlib.util.find_spec('transformers') is not None def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @wraps(SCREAMING_SNAKE_CASE__ ) def wrapper(self , SCREAMING_SNAKE_CASE__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , SCREAMING_SNAKE_CASE__ ) return wrapper def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _snake_case , _snake_case , _snake_case ) @local class lowercase__ ( parameterized.TestCase ): '''simple docstring''' A_ : Optional[int] = {} A_ : Union[str, Any] = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : str = """[...]""" _SCREAMING_SNAKE_CASE : Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path ) _SCREAMING_SNAKE_CASE : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case ) # check parameters _SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__snake_case , metric_module.__name__ ): with self.use_local_metrics(): try: _SCREAMING_SNAKE_CASE : int = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCAmelCase_ ( self , __snake_case ): _SCREAMING_SNAKE_CASE : List[Any] = """[...]""" _SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path ) # run doctest with self.use_local_metrics(): _SCREAMING_SNAKE_CASE : List[str] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCAmelCase_ ( self , __snake_case , __snake_case ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ): yield else: yield @contextmanager def UpperCAmelCase_ ( self ): def load_local_metric(__snake_case , *__snake_case , **__snake_case ): return load_metric(os.path.join("""metrics""" , __snake_case ) , *__snake_case , **__snake_case ) with patch("""datasets.load_metric""" ) as mock_load_metric: _SCREAMING_SNAKE_CASE : Union[str, Any] = load_local_metric yield @classmethod def UpperCAmelCase_ ( cls , __snake_case ): def wrapper(__snake_case ): _SCREAMING_SNAKE_CASE : Any = contextmanager(__snake_case ) _SCREAMING_SNAKE_CASE : int = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class lowercase__ ( _snake_case ): '''simple docstring''' def UpperCAmelCase_ ( self , __snake_case ): assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: _SCREAMING_SNAKE_CASE : Any = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" import torch def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: _SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def snake_case_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def load_from_checkpoint(SCREAMING_SNAKE_CASE__ ): class lowercase__ : '''simple docstring''' def UpperCAmelCase_ ( self , __snake_case , *__snake_case , **__snake_case ): assert len(__snake_case ) == 2 _SCREAMING_SNAKE_CASE : Dict = [0.19, 0.92] return scores, sum(__snake_case ) / len(__snake_case ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: _SCREAMING_SNAKE_CASE : Any = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: _SCREAMING_SNAKE_CASE : List[str] = load_from_checkpoint yield def snake_case_ ( ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) _SCREAMING_SNAKE_CASE : List[str] = """ERROR""" _SCREAMING_SNAKE_CASE : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ): metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
200
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _A = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase ( snake_case__ ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): """simple docstring""" super().__init__() if safety_checker is None: logger.warning( F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , ) def _a (self , _lowerCamelCase = "auto" ): """simple docstring""" if slice_size == "auto": UpperCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase_ ) def _a (self ): """simple docstring""" self.enable_attention_slicing(UpperCAmelCase_ ) @torch.no_grad() def __call__(self , _lowerCamelCase , _lowerCamelCase=16000 , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : int = self.speech_processor.feature_extractor( UpperCAmelCase_ , return_tensors="""pt""" , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device ) UpperCAmelCase__ : List[Any] = self.speech_model.generate(UpperCAmelCase_ , max_length=480000 ) UpperCAmelCase__ : Any = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[ 0 ] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase__ : List[Any] = 1 elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase__ : Optional[Any] = len(UpperCAmelCase_ ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(UpperCAmelCase_ )}.""" ) # get prompt text embeddings UpperCAmelCase__ : Dict = self.tokenizer( UpperCAmelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase__ : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase__ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) UpperCAmelCase__ : Any = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase__ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ : Dict = text_embeddings.shape UpperCAmelCase__ : Optional[Any] = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 ) UpperCAmelCase__ : int = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase__ : Optional[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase__ : List[str] if negative_prompt is None: UpperCAmelCase__ : Dict = [""] * batch_size elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !=""" F""" {type(UpperCAmelCase_ )}.""" ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase__ : str = [negative_prompt] elif batch_size != len(UpperCAmelCase_ ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: UpperCAmelCase__ : Optional[int] = negative_prompt UpperCAmelCase__ : Dict = text_input_ids.shape[-1] UpperCAmelCase__ : List[str] = self.tokenizer( UpperCAmelCase_ , padding="""max_length""" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="""pt""" , ) UpperCAmelCase__ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ : Any = uncond_embeddings.shape[1] UpperCAmelCase__ : List[str] = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 ) UpperCAmelCase__ : str = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase__ : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase__ : Union[str, Any] = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device="""cpu""" , dtype=UpperCAmelCase_ ).to( self.device ) else: UpperCAmelCase__ : Optional[Any] = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) UpperCAmelCase__ : Any = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCAmelCase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase__ : Optional[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase__ : str = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase__ : Tuple = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase__ : List[Any] = {} if accepts_eta: UpperCAmelCase__ : str = eta for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase__ : Any = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ ) # predict the noise residual UpperCAmelCase__ : Optional[int] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase__ : str = noise_pred.chunk(2 ) UpperCAmelCase__ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase__ : Optional[Any] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase__ : List[str] = 1 / 0.18_215 * latents UpperCAmelCase__ : List[str] = self.vae.decode(UpperCAmelCase_ ).sample UpperCAmelCase__ : Any = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase__ : Dict = self.numpy_to_pil(UpperCAmelCase_ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
365
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'transfo-xl' SCREAMING_SNAKE_CASE = ['mems'] SCREAMING_SNAKE_CASE = { 'n_token': 'vocab_size', 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__(self , _lowerCamelCase=267735 , _lowerCamelCase=[20000, 40000, 200000] , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=4096 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=18 , _lowerCamelCase=1600 , _lowerCamelCase=1000 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=-1 , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="normal" , _lowerCamelCase=0.01 , _lowerCamelCase=0.01 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : Any = vocab_size UpperCAmelCase__ : Dict = [] self.cutoffs.extend(_lowerCamelCase ) if proj_share_all_but_first: UpperCAmelCase__ : Optional[int] = [False] + [True] * len(self.cutoffs ) else: UpperCAmelCase__ : List[Any] = [False] + [False] * len(self.cutoffs ) UpperCAmelCase__ : Dict = d_model UpperCAmelCase__ : Dict = d_embed UpperCAmelCase__ : List[Any] = d_head UpperCAmelCase__ : List[str] = d_inner UpperCAmelCase__ : Any = div_val UpperCAmelCase__ : str = pre_lnorm UpperCAmelCase__ : int = n_layer UpperCAmelCase__ : Optional[Any] = n_head UpperCAmelCase__ : Tuple = mem_len UpperCAmelCase__ : Dict = same_length UpperCAmelCase__ : Union[str, Any] = attn_type UpperCAmelCase__ : Optional[int] = clamp_len UpperCAmelCase__ : str = sample_softmax UpperCAmelCase__ : Any = adaptive UpperCAmelCase__ : List[Any] = dropout UpperCAmelCase__ : List[Any] = dropatt UpperCAmelCase__ : Tuple = untie_r UpperCAmelCase__ : str = init UpperCAmelCase__ : Optional[int] = init_range UpperCAmelCase__ : Tuple = proj_init_std UpperCAmelCase__ : str = init_std UpperCAmelCase__ : List[str] = layer_norm_epsilon super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase ) @property def _a (self ): """simple docstring""" logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def _a (self , _lowerCamelCase ): """simple docstring""" raise NotImplementedError( F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
166
0
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __lowerCAmelCase = sys.version_info >= (3, 10) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = field(default="toto" , metadata={"help": "help message"} ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = None class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "titi" lowerCAmelCase_ = "toto" class _lowerCAmelCase ( __snake_case ): '''simple docstring''' lowerCAmelCase_ = "titi" lowerCAmelCase_ = "toto" lowerCAmelCase_ = 42 @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = "toto" def lowercase (self ) -> int: _snake_case = BasicEnum(self.foo ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = "toto" def lowercase (self ) -> Optional[int]: _snake_case = MixedTypeEnum(self.foo ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = None lowerCAmelCase_ = field(default=__snake_case , metadata={"help": "help message"} ) lowerCAmelCase_ = None lowerCAmelCase_ = list_field(default=[] ) lowerCAmelCase_ = list_field(default=[] ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = list_field(default=[] ) lowerCAmelCase_ = list_field(default=[1, 2, 3] ) lowerCAmelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] ) lowerCAmelCase_ = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = field() lowerCAmelCase_ = field() lowerCAmelCase_ = field() def lowercase (self ) -> Optional[int]: _snake_case = BasicEnum(self.required_enum ) @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = field() lowerCAmelCase_ = None lowerCAmelCase_ = field(default="toto" , metadata={"help": "help message"} ) lowerCAmelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = False lowerCAmelCase_ = True lowerCAmelCase_ = None @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = None lowerCAmelCase_ = field(default=__snake_case , metadata={"help": "help message"} ) lowerCAmelCase_ = None lowerCAmelCase_ = list_field(default=[] ) lowerCAmelCase_ = list_field(default=[] ) class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _snake_case = {k: v for k, v in vars(UpperCAmelCase ).items() if k != """container"""} _snake_case = {k: v for k, v in vars(UpperCAmelCase ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" , UpperCAmelCase ) and yy.get("""choices""" , UpperCAmelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](UpperCAmelCase ) , yy["""type"""](UpperCAmelCase ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[str]: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument("""--bar""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument("""--baz""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument("""--flag""" , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs="""?""" ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((_snake_case), ) = parser.parse_args_into_dataclasses(UpperCAmelCase , look_for_args_file=UpperCAmelCase ) self.assertFalse(example.flag ) def lowercase (self ) -> Tuple: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase , help="""help message""" ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Optional[int]: _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs="""?""" ) expected.add_argument("""--baz""" , type=UpperCAmelCase , default=UpperCAmelCase , const=UpperCAmelCase , nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase , dest="""baz""" ) expected.add_argument("""--opt""" , type=UpperCAmelCase , default=UpperCAmelCase ) _snake_case = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase ) for dataclass_type in dataclass_types: _snake_case = HfArgumentParser(UpperCAmelCase ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) ) _snake_case = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) ) _snake_case = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) ) _snake_case = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) ) _snake_case = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , baz=UpperCAmelCase , opt=UpperCAmelCase ) ) def lowercase (self ) -> Optional[Any]: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) _snake_case = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _snake_case = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) _snake_case = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _snake_case = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) _snake_case = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase (self ) -> Optional[Any]: @dataclass class _lowerCAmelCase : '''simple docstring''' lowerCAmelCase_ = "toto" _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument( """--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(args.foo , """toto""" ) _snake_case = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo , """titi""" ) _snake_case = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo , 42 ) def lowercase (self ) -> Any: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase ) expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase ) expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_args([] ) self.assertEqual( UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , ) _snake_case = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) ) def lowercase (self ) -> List[str]: _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo""" , default=UpperCAmelCase , type=UpperCAmelCase ) expected.add_argument("""--bar""" , default=UpperCAmelCase , type=UpperCAmelCase , help="""help message""" ) expected.add_argument("""--baz""" , default=UpperCAmelCase , type=UpperCAmelCase ) expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase ) expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase ) _snake_case = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase ) for dataclass_type in dataclass_types: _snake_case = HfArgumentParser(UpperCAmelCase ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(UpperCAmelCase , Namespace(foo=UpperCAmelCase , bar=UpperCAmelCase , baz=UpperCAmelCase , ces=[] , des=[] ) ) _snake_case = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) ) def lowercase (self ) -> Optional[Any]: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument("""--required_str""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase , ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> int: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument("""--foo""" , type=UpperCAmelCase , required=UpperCAmelCase ) expected.add_argument( """--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase , ) expected.add_argument("""--opt""" , type=UpperCAmelCase , default=UpperCAmelCase ) expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase , help="""help message""" ) expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase ) self.argparsersEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Optional[Any]: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } _snake_case = parser.parse_dict(UpperCAmelCase )[0] _snake_case = BasicExample(**UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Tuple: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(UpperCAmelCase , parser.parse_dict , UpperCAmelCase , allow_extra_keys=UpperCAmelCase ) def lowercase (self ) -> Dict: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = os.path.join(UpperCAmelCase , """temp_json""" ) os.mkdir(UpperCAmelCase ) with open(temp_local_path + """.json""" , """w+""" ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] _snake_case = BasicExample(**UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> Any: _snake_case = HfArgumentParser(UpperCAmelCase ) _snake_case = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = os.path.join(UpperCAmelCase , """temp_yaml""" ) os.mkdir(UpperCAmelCase ) with open(temp_local_path + """.yaml""" , """w+""" ) as f: yaml.dump(UpperCAmelCase , UpperCAmelCase ) _snake_case = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] _snake_case = BasicExample(**UpperCAmelCase ) self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def lowercase (self ) -> List[Any]: _snake_case = HfArgumentParser(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase )
341
'''simple docstring''' from math import factorial, radians def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 18 , _SCREAMING_SNAKE_CASE = 10 ): _snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _snake_case = radians(_SCREAMING_SNAKE_CASE ) _snake_case = angle_in_radians _snake_case = 3 _snake_case = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) _snake_case = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__('doctest').testmod()
341
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Any=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Optional[Any]=3_7 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Tuple=5_1_2 , UpperCamelCase_ : str=1_6 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Union[str, Any]=None , ): lowerCAmelCase : List[Any] = parent lowerCAmelCase : List[Any] = batch_size lowerCAmelCase : Optional[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[str] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : List[str] = use_labels lowerCAmelCase : Optional[int] = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : str = num_attention_heads lowerCAmelCase : Union[str, Any] = intermediate_size lowerCAmelCase : Any = hidden_act lowerCAmelCase : Union[str, Any] = hidden_dropout_prob lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase : List[str] = max_position_embeddings lowerCAmelCase : Any = type_vocab_size lowerCAmelCase : List[str] = type_sequence_label_size lowerCAmelCase : int = initializer_range lowerCAmelCase : Dict = num_labels lowerCAmelCase : Any = num_choices lowerCAmelCase : Union[str, Any] = scope def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : List[Any] = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : str = None if self.use_token_type_ids: lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : Union[str, Any] = None lowerCAmelCase : List[str] = None lowerCAmelCase : Union[str, Any] = None if self.use_labels: lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : int ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[Any] = NystromformerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) lowerCAmelCase : Dict = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[str] = NystromformerForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[str] = NystromformerForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[Any] = self.num_labels lowerCAmelCase : Optional[Any] = NystromformerForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = self.num_labels lowerCAmelCase : Dict = NystromformerForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = self.num_choices lowerCAmelCase : Tuple = NystromformerForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase : int = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Any = config_and_inputs lowerCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Dict = NystromformerModelTester(self ) lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : str ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : Optional[int] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_torch class snake_case_( unittest.TestCase ): @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowerCAmelCase : str = model(UpperCamelCase_ )[0] lowerCAmelCase : Any = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = torch.tensor( [[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : int = '''the [MASK] of Belgium is Brussels''' lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : str = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) with torch.no_grad(): lowerCAmelCase : str = model(encoding.input_ids ).logits lowerCAmelCase : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , '''capital''' )
314
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : int = -1 lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : str = TextStreamer(UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Any = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Any = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) lowerCAmelCase : Dict = TextIteratorStreamer(UpperCamelCase_ ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : str = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() lowerCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = -1 lowerCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ ) lowerCAmelCase : Any = greedy_ids[:, input_ids.shape[1] :] lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCAmelCase : Tuple = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1_0 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCAmelCase : str = cs.out[:-1] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = -1 lowerCAmelCase : Tuple = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCAmelCase : Any = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCAmelCase : Any = cs.out[:-1] # Remove the final "\n" lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ ) lowerCAmelCase : str = -1 lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 ) lowerCAmelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=UpperCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : List[str] = '''''' for new_text in streamer: streamer_text += new_text
314
1
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ) -> list: _lowerCAmelCase : List[str] = len(_lowerCamelCase ) _lowerCAmelCase : Dict = [[0] * n for i in range(_lowerCamelCase )] for i in range(_lowerCamelCase ): _lowerCAmelCase : int = y_points[i] for i in range(2 ,_lowerCamelCase ): for j in range(_lowerCamelCase ,_lowerCamelCase ): _lowerCAmelCase : Dict = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
44
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _a : Union[str, Any] = re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex _a : List[str] = 10 _a : List[Any] = 256 def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]: if len(_lowerCamelCase ) < MIN_NUM_TOKENS: return None _lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase ) for token in set(_lowerCamelCase ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]: return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0} class __A : def __init__( self , *, a__ = 0.8_5 , ): _lowerCAmelCase : List[Any] = duplication_jaccard_threshold _lowerCAmelCase : Union[str, Any] = NUM_PERM _lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _lowerCAmelCase : Optional[int] = defaultdict(a__ ) def __A ( self , a__ , a__ ): _lowerCAmelCase : Optional[Any] = self._index.query(a__ ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(a__ , a__ ) if len(a__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(a__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(a__ ) def __A ( self ): _lowerCAmelCase : int = [] for base, duplicates in self._duplicate_clusters.items(): _lowerCAmelCase : List[str] = [base] + list(a__ ) # reformat the cluster to be a list of dict _lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(a__ ) return duplicate_clusters def __A ( self , a__ ): _lowerCAmelCase : Dict = self.get_duplicate_clusters() with open(a__ , """w""" ) as f: json.dump(a__ , a__ ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element _lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,): if data is not None: yield data def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]: _lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ): di.add(_lowerCamelCase ,_lowerCamelCase ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float: _lowerCAmelCase : Any = get_tokens(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _a : str = None def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict: _lowerCAmelCase : int = [] for elementa in cluster: _lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: _lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold: elementa["copies"] += 1 break else: _lowerCAmelCase : Any = 1 extremes.append(_lowerCamelCase ) return extremes def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str: global _shared_dataset _lowerCAmelCase : Tuple = dataset _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( _lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,): extremes_list.append(_lowerCamelCase ) return extremes_list def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: _lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase ) _lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} _lowerCAmelCase : Optional[int] = {} _lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) for extremes in extremes_clusters: for element in extremes: _lowerCAmelCase : Union[str, Any] = element _lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() ) _lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict if element["is_extreme"]: _lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""] print(f"Original dataset size: {len(_lowerCamelCase )}" ) print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" ) print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" ) print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" ) print(f"Filtered dataset size: {len(_lowerCamelCase )}" ) return ds_filter, duplicate_clusters
44
1
"""simple docstring""" def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int: a__: Any = limit + 1 a__: List[str] = [0] * limit for first_term in range(1 , _SCREAMING_SNAKE_CASE ): for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): a__: Any = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a a__: Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"{solution() = }")
203
"""simple docstring""" from __future__ import annotations class __snake_case : def __init__( self , lowercase=None) -> Optional[Any]: '''simple docstring''' a__: int = data a__: str = None def __repr__( self) -> List[str]: '''simple docstring''' a__: Optional[Any] = [] a__: Union[str, Any] = self while temp: string_rep.append(f'{temp.data}') a__: Tuple = temp.next return "->".join(lowercase) def __a ( _SCREAMING_SNAKE_CASE ) ->str: if not elements_list: raise Exception('The Elements List is empty' ) a__: Any = Node(elements_list[0] ) for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): a__: Optional[Any] = Node(elements_list[i] ) a__: Tuple = current.next return head def __a ( _SCREAMING_SNAKE_CASE ) ->None: if head_node is not None and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __a ( ) ->Optional[Any]: from doctest import testmod testmod() a__: Tuple = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(_SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
203
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): while second != 0: __lowercase : Tuple = first & second first ^= second __lowercase : Dict = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() a_ = int(input('Enter the first number: ').strip()) a_ = int(input('Enter the second number: ').strip()) print(F"{add(first, second) = }")
249
import doctest from collections import deque import numpy as np class A_ : '''simple docstring''' def __init__(self ) -> None: __UpperCAmelCase = [2, 1, 2, -1] __UpperCAmelCase = [1, 2, 3, 4] def lowerCAmelCase_ (self ) -> list[float]: __UpperCAmelCase = len(self.first_signal ) __UpperCAmelCase = len(self.second_signal ) __UpperCAmelCase = max(lowercase__ , lowercase__ ) # create a zero matrix of max_length x max_length __UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(lowercase__ ): __UpperCAmelCase = deque(self.second_signal ) rotated_signal.rotate(lowercase__ ) for j, item in enumerate(lowercase__ ): matrix[i][j] += item # multiply the matrix with the first signal __UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(lowercase__ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
333
0
lowerCAmelCase : List[Any] = tuple[float, float, float] lowerCAmelCase : Optional[Any] = tuple[float, float, float] def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = end_pointa[0] - end_pointa[0] SCREAMING_SNAKE_CASE_: str = end_pointa[1] - end_pointa[1] SCREAMING_SNAKE_CASE_: List[str] = end_pointa[2] - end_pointa[2] return (x, y, z) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = ab[1] * ac[2] - ab[2] * ac[1] # *i SCREAMING_SNAKE_CASE_: Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j SCREAMING_SNAKE_CASE_: Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return tuple(round(_UpperCAmelCase , _UpperCAmelCase ) for x in vector ) == (0, 0, 0) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: Any = create_vector(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = create_vector(_UpperCAmelCase , _UpperCAmelCase ) return is_zero_vector(get_ad_vectors_cross(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
127
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""} lowerCAmelCase : List[str] = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } lowerCAmelCase : List[Any] = { """YituTech/conv-bert-base""": 512, """YituTech/conv-bert-medium-small""": 512, """YituTech/conv-bert-small""": 512, } lowerCAmelCase : Tuple = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : str = VOCAB_FILES_NAMES _UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Dict = ConvBertTokenizer def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ): super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type")) SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE_: List[str] = strip_accents SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None): SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id] SCREAMING_SNAKE_CASE_: int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__) return tuple(lowerCAmelCase__)
127
1
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase__ = CLIPImageProcessor() lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') lowerCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
104
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer __snake_case = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __snake_case = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } __snake_case = { """unc-nlp/lxmert-base-uncased""": 5_12, } __snake_case = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class lowercase__ ( __UpperCamelCase ): A__ : List[Any] =VOCAB_FILES_NAMES A__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP A__ : Optional[int] =PRETRAINED_INIT_CONFIGURATION A__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : List[str] =LxmertTokenizer def __init__( self : int , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]="[UNK]" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : List[str]="[PAD]" , UpperCAmelCase_ : Optional[Any]="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Optional[int] , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) SCREAMING_SNAKE_CASE__ = do_lower_case SCREAMING_SNAKE_CASE__ = strip_accents SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars SCREAMING_SNAKE_CASE__ = normalizer_class(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE__ = do_lower_case def A_ ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ): SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ): SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ): SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
370
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase__ : def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=None , ): SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = self.vocab_size - 1 def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , *UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE__ = OpenAIGPTModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE__ = OpenAIGPTDoubleHeadsModel(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : int ): SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = OpenAIGPTForSequenceClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A__ : Union[str, Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ : Any =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ : Dict =( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ): SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = inputs_dict['labels'] SCREAMING_SNAKE_CASE__ = inputs_dict['labels'] SCREAMING_SNAKE_CASE__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ ) return inputs_dict def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = OpenAIGPTModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 ) def A_ ( self : Optional[int] ): self.config_tester.run_common_tests() def A_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase_ ) def A_ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ ) def A_ ( self : List[str] ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase_ ) def A_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase_ ) @slow def A_ ( self : Optional[int] ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = OpenAIGPTModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @require_torch class lowercase__ ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase_ ) # the president is SCREAMING_SNAKE_CASE__ = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ ) self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
169
0
import requests from bsa import BeautifulSoup def _a ( SCREAMING_SNAKE_CASE_ : str = "https://www.worldometers.info/coronavirus" ): __lowerCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" ) __lowerCAmelCase = soup.findAll("h1" ) __lowerCAmelCase = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} if __name__ == "__main__": print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""") for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
92
def lowerCamelCase__ ( ) -> int: return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(snake_case_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
24
0
import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) __UpperCAmelCase = logging.getLogger(__name__) @dataclass(frozen=__UpperCamelCase ) class __a : __snake_case : str __snake_case : str __snake_case : Optional[str] = None __snake_case : Optional[str] = None __snake_case : Optional[str] = None @dataclass(frozen=__UpperCamelCase ) class __a : __snake_case : List[int] __snake_case : Optional[List[int]] = None __snake_case : Optional[List[int]] = None __snake_case : Optional[Union[int, float]] = None __snake_case : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __a ( __UpperCamelCase ): __snake_case : List[InputFeatures] def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : bool = False , ): lowerCAmelCase_ : int = hans_processors[task]() lowerCAmelCase_ : int = os.path.join( UpperCAmelCase , """cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(UpperCAmelCase ) , UpperCAmelCase , ) , ) lowerCAmelCase_ : Any = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase_ , lowerCAmelCase_ : int = label_list[2], label_list[1] lowerCAmelCase_ : List[str] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCAmelCase_ : Tuple = cached_features_file + """.lock""" with FileLock(UpperCAmelCase ): if os.path.exists(UpperCAmelCase ) and not overwrite_cache: logger.info(F'Loading features from cached file {cached_features_file}' ) lowerCAmelCase_ : str = torch.load(UpperCAmelCase ) else: logger.info(F'Creating features from dataset file at {data_dir}' ) lowerCAmelCase_ : Optional[int] = ( processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase ) ) logger.info("""Training examples: %s""" , len(UpperCAmelCase ) ) lowerCAmelCase_ : Any = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) logger.info("""Saving features into cached file %s""" , UpperCAmelCase ) torch.save(self.features , UpperCAmelCase ) def __len__( self : str ): return len(self.features ) def __getitem__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ): return self.features[i] def A ( self : List[str] ): return self.label_list if is_tf_available(): import tensorflow as tf class __a : __snake_case : List[InputFeatures] def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : str , UpperCAmelCase : Optional[int] = 1_28 , UpperCAmelCase : List[Any]=False , UpperCAmelCase : bool = False , ): lowerCAmelCase_ : Union[str, Any] = hans_processors[task]() lowerCAmelCase_ : str = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowerCAmelCase_ , lowerCAmelCase_ : Any = label_list[2], label_list[1] lowerCAmelCase_ : Dict = label_list lowerCAmelCase_ : Any = processor.get_dev_examples(UpperCAmelCase ) if evaluate else processor.get_train_examples(UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = hans_convert_examples_to_features(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCAmelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowerCAmelCase_ : List[Any] = tf.data.Dataset.from_generator( UpperCAmelCase , ( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) , ( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def A ( self : Dict ): return self.dataset def __len__( self : Optional[Any] ): return len(self.features ) def __getitem__( self : Dict , UpperCAmelCase : Union[str, Any] ): return self.features[i] def A ( self : Optional[int] ): return self.label_list class __a ( __UpperCamelCase ): def A ( self : Tuple , UpperCAmelCase : Union[str, Any] ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , """heuristics_train_set.txt""" ) ) , """train""" ) def A ( self : Any , UpperCAmelCase : Dict ): return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase , """heuristics_evaluation_set.txt""" ) ) , """dev""" ) def A ( self : List[Any] ): return ["contradiction", "entailment", "neutral"] def A ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : int ): lowerCAmelCase_ : int = [] for i, line in enumerate(UpperCAmelCase ): if i == 0: continue lowerCAmelCase_ : Optional[int] = """%s-%s""" % (set_type, line[0]) lowerCAmelCase_ : Any = line[5] lowerCAmelCase_ : Union[str, Any] = line[6] lowerCAmelCase_ : Dict = line[7][2:] if line[7].startswith("""ex""" ) else line[7] lowerCAmelCase_ : List[Any] = line[0] examples.append(InputExample(guid=UpperCAmelCase , text_a=UpperCAmelCase , text_b=UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) ) return examples def __UpperCamelCase ( lowercase__ : List[InputExample] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : PreTrainedTokenizer , ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ : Any = {label: i for i, label in enumerate(lowercase__ )} lowerCAmelCase_ : str = [] for ex_index, example in tqdm.tqdm(enumerate(lowercase__ ) , desc="""convert examples to features""" ): if ex_index % 10000 == 0: logger.info("""Writing example %d""" % (ex_index) ) lowerCAmelCase_ : List[Any] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowercase__ , max_length=lowercase__ , padding="""max_length""" , truncation=lowercase__ , return_overflowing_tokens=lowercase__ , ) lowerCAmelCase_ : Union[str, Any] = label_map[example.label] if example.label in label_map else 0 lowerCAmelCase_ : Optional[Any] = int(example.pairID ) features.append(InputFeatures(**lowercase__ , label=lowercase__ , pairID=lowercase__ ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features __UpperCAmelCase = { 'hans': 3, } __UpperCAmelCase = { 'hans': HansProcessor, }
28
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __a ( __UpperCamelCase ): __snake_case : Any = ["""image_processor""", """tokenizer"""] __snake_case : Tuple = """BlipImageProcessor""" __snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ): lowerCAmelCase_ : str = False super().__init__(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ : Tuple = self.image_processor def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowerCAmelCase_ : str = self.tokenizer lowerCAmelCase_ : List[Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) return text_encoding # add pixel_values lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase ) if text is not None: lowerCAmelCase_ : Optional[Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) else: lowerCAmelCase_ : int = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase ) return encoding_image_processor def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def A ( self : int ): lowerCAmelCase_ : int = self.tokenizer.model_input_names lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
28
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class a ( pl.LightningModule ): """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> Optional[int]: super().__init__() _A = model _A = 2 _A = nn.Linear(self.model.config.hidden_size , self.num_labels ) def UpperCAmelCase ( self ) -> Any: pass def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :str) -> List[Any]: # load longformer model from model identifier _A = LongformerModel.from_pretrained(snake_case__) _A = LightningModel(snake_case__) _A = torch.load(snake_case__ , map_location=torch.device("""cpu""")) lightning_model.load_state_dict(ckpt["""state_dict"""]) # init longformer question answering model _A = LongformerForQuestionAnswering.from_pretrained(snake_case__) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(snake_case__) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''') if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
180
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any: _A = [] for part_id in partition_order: _A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect() for row_idx, row in enumerate(snake_case__): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict())) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> Optional[Any]: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(100).repartition(1) _A = Spark(snake_case__) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> Union[str, Any]: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(10).repartition(2) _A = [1, 0] _A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions. _A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__) for i, (row_id, row_dict) in enumerate(generate_fn()): _A , _A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> int: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(10).repartition(1) _A = SparkExamplesIterable(snake_case__) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case__): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> Union[str, Any]: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(30).repartition(3) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""") as generator_mock: _A = lambda snake_case__: x.reverse() _A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0]) _A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case__): _A , _A = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> List[str]: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(20).repartition(4) # Partitions 0 and 2 _A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2) assert shard_it_a.n_shards == 2 _A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2]) for i, (row_id, row_dict) in enumerate(snake_case__): _A , _A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 _A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2) assert shard_it_a.n_shards == 2 _A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3]) for i, (row_id, row_dict) in enumerate(snake_case__): _A , _A = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def snake_case ( ) -> Tuple: _A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate() _A = spark.range(100).repartition(1) _A = Spark(snake_case__) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
180
1
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
121
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { '''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''], '''tokenization_deberta''': ['''DebertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''DebertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DebertaForMaskedLM''', '''DebertaForQuestionAnswering''', '''DebertaForSequenceClassification''', '''DebertaForTokenClassification''', '''DebertaModel''', '''DebertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDebertaForMaskedLM''', '''TFDebertaForQuestionAnswering''', '''TFDebertaForSequenceClassification''', '''TFDebertaForTokenClassification''', '''TFDebertaModel''', '''TFDebertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
121
1
from __future__ import annotations import numpy as np def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = np.shape(a__ ) if rows != columns: SCREAMING_SNAKE_CASE : int = ( '''\'table\' has to be of square shaped array but got a ''' F"""{rows}x{columns} array:\n{table}""" ) raise ValueError(a__ ) SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE : int = np.zeros((rows, columns) ) for i in range(a__ ): for j in range(a__ ): SCREAMING_SNAKE_CASE : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(a__ ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE : Optional[Any] = 1 for j in range(a__ , a__ ): SCREAMING_SNAKE_CASE : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(a__ ) ) SCREAMING_SNAKE_CASE : List[Any] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
313
import csv import tweepy # Twitter API credentials a__ : Union[str, Any] = '''''' a__ : List[str] = '''''' a__ : Any = '''''' a__ : List[str] = '''''' def UpperCAmelCase_( a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ ) auth.set_access_token(a__ , a__ ) SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ ) # initialize a list to hold all the tweepy Tweets SCREAMING_SNAKE_CASE : Any = [] # make initial request for most recent tweets (200 is the maximum allowed count) SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 ) # save most recent tweets alltweets.extend(a__ ) # save the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(a__ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates SCREAMING_SNAKE_CASE : Any = api.user_timeline( screen_name=a__ , count=200 , max_id=a__ ) # save most recent tweets alltweets.extend(a__ ) # update the id of the oldest tweet less one SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1 print(F"""...{len(a__ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(a__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
313
1
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: str = KandinskyVaaPipeline __magic_name__: str = [ "image_embeds", "negative_image_embeds", ] __magic_name__: str = ["image_embeds", "negative_image_embeds"] __magic_name__: int = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__: Dict = False @property def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> Any: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : List[str] ) -> Tuple: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase_ ( self : Optional[Any] ) -> str: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self : List[Any] ) -> Any: """simple docstring""" return 100 @property def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) snake_case_ : Dict = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case_ : Dict = UNetaDConditionModel(**_A ) return model @property def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self : str ) -> int: """simple docstring""" torch.manual_seed(0 ) snake_case_ : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self : Dict ) -> Dict: """simple docstring""" snake_case_ : Any = self.dummy_unet snake_case_ : List[Any] = self.dummy_movq snake_case_ : Any = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , ) snake_case_ : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def UpperCAmelCase_ ( self : List[Any] , _A : Tuple , _A : List[str]=0 ) -> int: """simple docstring""" snake_case_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A ) snake_case_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _A ) if str(_A ).startswith('mps' ): snake_case_ : Union[str, Any] = torch.manual_seed(_A ) else: snake_case_ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A ) snake_case_ : List[Any] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" snake_case_ : List[Any] = 'cpu' snake_case_ : List[str] = self.get_dummy_components() snake_case_ : Union[str, Any] = self.pipeline_class(**_A ) snake_case_ : Dict = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : List[str] = pipe(**self.get_dummy_inputs(_A ) ) snake_case_ : Any = output.images snake_case_ : int = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] snake_case_ : List[Any] = image[0, -3:, -3:, -1] snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ : List[str] = np.array( [0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Any ) -> Optional[int]: """simple docstring""" snake_case_ : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) snake_case_ : int = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) snake_case_ : int = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) snake_case_ : Tuple = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) snake_case_ : int = 'red cat, 4k photo' snake_case_ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) snake_case_ : Tuple = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple() snake_case_ : str = torch.Generator(device='cuda' ).manual_seed(0 ) snake_case_ : Dict = pipeline( image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , output_type='np' , ) snake_case_ : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_A , _A )
358
def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Optional[int] = int(__a ) # Initialize Result snake_case_ : Tuple = [] # Traverse through all denomination for denomination in reversed(__a ): # Find denominations while int(__a ) >= int(__a ): total_value -= int(__a ) answer.append(__a ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): _SCREAMING_SNAKE_CASE = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) _SCREAMING_SNAKE_CASE = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter _SCREAMING_SNAKE_CASE = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] _SCREAMING_SNAKE_CASE = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') _SCREAMING_SNAKE_CASE = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
88
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _A ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_SCREAMING_SNAKE_CASE ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def _A ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def _A ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_SCREAMING_SNAKE_CASE ): http_head("https://huggingface.co" )
95
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40] SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: SCREAMING_SNAKE_CASE = [96, 1_20, 1_44] SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: SCREAMING_SNAKE_CASE = [64, 80, 96] SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20] SCREAMING_SNAKE_CASE = 0.05 SCREAMING_SNAKE_CASE = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = 5_12 SCREAMING_SNAKE_CASE = 16 SCREAMING_SNAKE_CASE = 21 SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json""" else: SCREAMING_SNAKE_CASE = 10_00 SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE = """huggingface/label-files""" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' for i in range(1 , 6 ): if F"""layer_{i}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F""".{i}.{j}.""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" ) if "expand_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if F""".global_rep.{i}.weight""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if F""".global_rep.{i}.bias""" in name: SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): SCREAMING_SNAKE_CASE = """mobilevit.""" + name return name def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict: '''simple docstring''' if base_model: SCREAMING_SNAKE_CASE = """""" else: SCREAMING_SNAKE_CASE = """mobilevit.""" for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if key[:8] == "encoder.": SCREAMING_SNAKE_CASE = key[8:] if "qkv" in key: SCREAMING_SNAKE_CASE = key.split(""".""" ) SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1 SCREAMING_SNAKE_CASE = int(key_split[3] ) SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" ) SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size SCREAMING_SNAKE_CASE = ( F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: SCREAMING_SNAKE_CASE = val[:dim, :] SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE = val[-dim:, :] else: SCREAMING_SNAKE_CASE = val[:dim] SCREAMING_SNAKE_CASE = val[dim : dim * 2] SCREAMING_SNAKE_CASE = val[-dim:] else: SCREAMING_SNAKE_CASE = val return orig_state_dict def __lowercase ( ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE ) # load original state_dict SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval() else: SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval() SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]], [[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]], [[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]], [[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]], [[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor( [ [[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]], [[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]], ] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] ) elif mobilevit_name == "mobilevit_xs": SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] ) elif mobilevit_name == "mobilevit_xxs": SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ) else: raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: SCREAMING_SNAKE_CASE = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name] image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
296
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCAmelCase_ = None UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", }, """tokenizer_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""", }, } UpperCAmelCase_ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase_ = """▁""" class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = AlbertTokenizer def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , **lowerCAmelCase_ , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _snake_case = ( AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token ) super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = False if not self.vocab_file else True def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]: _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
295
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig UpperCAmelCase_ = logging.get_logger(__name__) # General docstring UpperCAmelCase_ = """ResNetConfig""" # Base docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = [1, 2048, 7, 7] # Image classification docstring UpperCAmelCase_ = """microsoft/resnet-50""" UpperCAmelCase_ = """tiger cat""" UpperCAmelCase_ = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]: super().__init__() _snake_case = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) _snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> Dict: super().__init__() _snake_case = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _snake_case = config.num_channels def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.pooler(lowerCAmelCase_ ) return embedding class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]: super().__init__() _snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) _snake_case = nn.BatchNormad(lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = self.convolution(lowerCAmelCase_ ) _snake_case = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]: super().__init__() _snake_case = in_channels != out_channels or stride != 1 _snake_case = out_channels // reduction _snake_case = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) _snake_case = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) _snake_case = ACTaFN[activation] def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: _snake_case = hidden_state _snake_case = self.layer(lowerCAmelCase_ ) _snake_case = self.shortcut(lowerCAmelCase_ ) hidden_state += residual _snake_case = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple: super().__init__() _snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer _snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor: _snake_case = input for layer in self.layers: _snake_case = layer(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__() _snake_case = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention: _snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _snake_case = hidden_states + (hidden_state,) _snake_case = stage_module(lowerCAmelCase_ ) if output_hidden_states: _snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ResNetConfig lowerCAmelCase_ = '''resnet''' lowerCAmelCase_ = '''pixel_values''' lowerCAmelCase_ = True def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = value UpperCAmelCase_ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase_ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> int: super().__init__(lowerCAmelCase_ ) _snake_case = config _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) _snake_case = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention: _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = encoder_outputs[0] _snake_case = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) _snake_case = config.num_labels _snake_case = ResNetModel(lowerCAmelCase_ ) # classification head _snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.pooler_output if return_dict else outputs[1] _snake_case = self.classifier(lowerCAmelCase_ ) _snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _snake_case = 'single_label_classification' else: _snake_case = 'multi_label_classification' if self.config.problem_type == "regression": _snake_case = MSELoss() if self.num_labels == 1: _snake_case = loss_fct(logits.squeeze() , labels.squeeze() ) else: _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": _snake_case = CrossEntropyLoss() _snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _snake_case = BCEWithLogitsLoss() _snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: _snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , _lowerCamelCase , ) class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ): def __init__( self , lowerCAmelCase_ ) -> List[Any]: super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) _snake_case = [config.embedding_size] + config.hidden_sizes _snake_case = ResNetEmbeddings(lowerCAmelCase_ ) _snake_case = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput: _snake_case = return_dict if return_dict is not None else self.config.use_return_dict _snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _snake_case = self.embedder(lowerCAmelCase_ ) _snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) _snake_case = outputs.hidden_states _snake_case = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _snake_case = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
295
1
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json _UpperCAmelCase : Optional[Any] = """sshleifer/mar_enro_6_3_student""" class a__ ( __A ): """simple docstring""" def _snake_case (self ): super().setUp() __lowerCAmelCase = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__lowercase , ) __lowerCAmelCase = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def _snake_case (self ): MarianMTModel.from_pretrained(__lowercase ) @slow @require_torch_gpu def _snake_case (self ): __lowerCAmelCase = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script __lowerCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() __lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): __lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) ) __lowerCAmelCase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __lowerCAmelCase = F""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __lowerCAmelCase = ['''finetune.py'''] + bash_script.split() + args with patch.object(__lowercase , '''argv''' , __lowercase ): __lowerCAmelCase = argparse.ArgumentParser() __lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase ) __lowerCAmelCase = SummarizationModule.add_model_specific_args(__lowercase , os.getcwd() ) __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = main(__lowercase ) # Check metrics __lowerCAmelCase = load_json(model.metrics_save_path ) __lowerCAmelCase = metrics['''val'''][0] __lowerCAmelCase = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __lowerCAmelCase = os.listdir(__lowercase ) __lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] __lowerCAmelCase = os.path.join(args.output_dir , __lowercase ) __lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' ) __lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class a__ ( __A ): """simple docstring""" @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def _snake_case (self ): __lowerCAmelCase = F"""{self.test_file_dir_str}/test_data/wmt_en_ro""" __lowerCAmelCase = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 1_28, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script __lowerCAmelCase = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) __lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) __lowerCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): __lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) ) __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = bash_script.replace('''--fp16''' , '''''' ) __lowerCAmelCase = 6 __lowerCAmelCase = ( ['''distillation.py'''] + bash_script.split() + [ F"""--output_dir={output_dir}""", '''--gpus=1''', '''--learning_rate=1e-3''', F"""--num_train_epochs={epochs}""", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(__lowercase , '''argv''' , __lowercase ): __lowerCAmelCase = argparse.ArgumentParser() __lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase ) __lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__lowercase , os.getcwd() ) __lowerCAmelCase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __lowerCAmelCase = distill_main(__lowercase ) # Check metrics __lowerCAmelCase = load_json(model.metrics_save_path ) __lowerCAmelCase = metrics['''val'''][0] __lowerCAmelCase = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.0_1 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase ) # check lightning ckpt can be loaded and has a reasonable statedict __lowerCAmelCase = os.listdir(__lowercase ) __lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0] __lowerCAmelCase = os.path.join(args.output_dir , __lowercase ) __lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' ) __lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
174
'''simple docstring''' from math import factorial, radians def __magic_name__( lowerCamelCase, lowerCamelCase = 1_8, lowerCamelCase = 1_0): __lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0) # Converting from degrees to radians __lowerCAmelCase = radians(lowerCamelCase) __lowerCAmelCase = angle_in_radians __lowerCAmelCase = 3 __lowerCAmelCase = -1 for _ in range(lowerCamelCase): result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase) __lowerCAmelCase = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(lowerCamelCase, lowerCamelCase) if __name__ == "__main__": __import__("""doctest""").testmod()
174
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __A ( lowerCAmelCase , unittest.TestCase ): lowerCAmelCase_ : List[Any] = FlaxAutoencoderKL @property def lowercase__ ( self : Tuple ): lowerCAmelCase : Dict = 4 lowerCAmelCase : str = 3 lowerCAmelCase : Optional[Any] = (32, 32) lowerCAmelCase : Any = jax.random.PRNGKey(0 ) lowerCAmelCase : List[str] = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowercase__ ( self : Dict ): lowerCAmelCase : Any = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } lowerCAmelCase : Optional[Any] = self.dummy_input return init_dict, inputs_dict
370
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __A ( unittest.TestCase ): def lowercase__ ( self : Optional[int] ): lowerCAmelCase : Tuple = tempfile.mkdtemp() # fmt: off lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowerCAmelCase : Tuple = {'unk_token': '<unk>'} lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(UpperCAmelCase_ ) ) lowerCAmelCase : Dict = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], 'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowercase__ ( self : List[str] ): lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self : Any ): lowerCAmelCase : List[str] = self.get_tokenizer() lowerCAmelCase : List[str] = self.get_rust_tokenizer() lowerCAmelCase : Optional[int] = self.get_image_processor() lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ ) def lowercase__ ( self : Tuple ): lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 ) lowerCAmelCase : Dict = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase_ ) def lowercase__ ( self : List[str] ): lowerCAmelCase : Any = self.get_image_processor() lowerCAmelCase : Union[str, Any] = self.get_tokenizer() lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) lowerCAmelCase : Dict = self.prepare_image_inputs() lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' ) lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = self.get_image_processor() lowerCAmelCase : Union[str, Any] = self.get_tokenizer() lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) lowerCAmelCase : Optional[int] = 'lower newer' lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : Tuple = self.get_image_processor() lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = 'lower newer' lowerCAmelCase : Optional[int] = self.prepare_image_inputs() lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase_ ): processor() def lowercase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = self.get_image_processor() lowerCAmelCase : str = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ ) lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : List[Any] = self.get_image_processor() lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) lowerCAmelCase : Dict = 'lower newer' lowerCAmelCase : Tuple = self.prepare_image_inputs() lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
323
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] a_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] a_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) a_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ): for tf_name, hf_name in patterns: __lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase ) return k def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ): __lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase ) __lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) __lowerCamelCase = torch_model.state_dict() __lowerCamelCase = {} # separating decoder weights __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = DECODER_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ): __lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue __lowerCamelCase = REMAINING_PATTERNS __lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __lowerCamelCase = v.T __lowerCamelCase = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" __lowerCamelCase = mapping['''model.embed_positions.weight'''] __lowerCamelCase = mapping.pop('''model.embed_positions.weight''' ) __lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) __lowerCamelCase = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def a__ ( _UpperCamelCase : int ): __lowerCamelCase = tf.train.list_variables(_UpperCamelCase ) __lowerCamelCase = {} __lowerCamelCase = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ): __lowerCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase = array return tf_weights def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ): __lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase ) __lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
330
1
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
300
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) _snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ["BeitFeatureExtractor"] _snake_case = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
300
1
'''simple docstring''' _A : List[str] ='''Alexander Joslin''' import operator as op from .stack import Stack def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: lowerCamelCase__ : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} lowerCamelCase__ : Stack[int] = Stack() lowerCamelCase__ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(UpperCamelCase ) ) elif i in operators: # RULE 2 operator_stack.push(UpperCamelCase ) elif i == ")": # RULE 4 lowerCamelCase__ : Optional[Any] = operator_stack.peek() operator_stack.pop() lowerCamelCase__ : Dict = operand_stack.peek() operand_stack.pop() lowerCamelCase__ : List[str] = operand_stack.peek() operand_stack.pop() lowerCamelCase__ : Optional[int] = operators[opr](UpperCamelCase , UpperCamelCase ) operand_stack.push(UpperCamelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": _A : Optional[Any] ='''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
41
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _A : List[str] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _A : Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Any = val def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: lowerCamelCase__ : Tuple = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowerCamelCase__ : Optional[int] = value else: lowerCamelCase__ : Any = value return new_state_dict def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict: lowerCamelCase__ : Optional[int] = """""" if is_panoptic: lowerCamelCase__ : Dict = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase__ : int = in_proj_weight[:256, :] lowerCamelCase__ : Any = in_proj_bias[:256] lowerCamelCase__ : str = in_proj_weight[256:512, :] lowerCamelCase__ : Optional[int] = in_proj_bias[256:512] lowerCamelCase__ : Dict = in_proj_weight[-256:, :] lowerCamelCase__ : str = in_proj_bias[-256:] def SCREAMING_SNAKE_CASE_ () -> List[Any]: lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int: lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowerCamelCase__ : Any = """resnet101""" if "dc5" in model_name: lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : int = """panoptic""" in model_name if is_panoptic: lowerCamelCase__ : List[str] = 250 else: lowerCamelCase__ : int = 91 lowerCamelCase__ : int = """huggingface/label-files""" lowerCamelCase__ : List[str] = """coco-detection-id2label.json""" lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : str = idalabel lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} # load image processor lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection""" lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase ) # prepare image lowerCamelCase__ : List[str] = prepare_img() lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval() lowerCamelCase__ : Dict = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): lowerCamelCase__ : int = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Any = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: lowerCamelCase__ : int = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Tuple = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase ) lowerCamelCase__ : Dict = val # finally, create HuggingFace model and load state dict lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = model(UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _A : List[Any] =argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) _A : Optional[Any] =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
41
1
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset A_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self : str , snake_case : str ): '''simple docstring''' super().__init__() A__ : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case ) A__ : int = list(model.children() )[:-2] A__ : Optional[Any] = nn.Sequential(*snake_case ) A__ : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _UpperCamelCase ( self : List[Any] , snake_case : str ): '''simple docstring''' A__ : List[str] = self.pool(self.model(snake_case ) ) A__ : str = torch.flatten(snake_case , start_dim=2 ) A__ : Union[str, Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): def __init__( self : Union[str, Any] , snake_case : str , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str , snake_case : int ): '''simple docstring''' A__ : Optional[Any] = [json.loads(snake_case ) for l in open(snake_case )] A__ : List[Any] = os.path.dirname(snake_case ) A__ : Any = tokenizer A__ : int = labels A__ : Optional[Any] = len(snake_case ) A__ : Dict = max_seq_length A__ : Union[str, Any] = transforms def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.data ) def __getitem__( self : Union[str, Any] , snake_case : Tuple ): '''simple docstring''' A__ : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=snake_case ) ) A__ : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1] A__ : Tuple = sentence[: self.max_seq_length] A__ : str = torch.zeros(self.n_classes ) A__ : List[str] = 1 A__ : Dict = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) A__ : Optional[Any] = self.transforms(snake_case ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : int = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def _lowerCAmelCase ( UpperCAmelCase__ : List[Any] ) ->Optional[int]: A__ : Union[str, Any] = [len(row["""sentence"""] ) for row in batch] A__ : Union[str, Any] = len(UpperCAmelCase__ ), max(UpperCAmelCase__ ) A__ : Union[str, Any] = torch.zeros(UpperCAmelCase__, UpperCAmelCase__, dtype=torch.long ) A__ : Union[str, Any] = torch.zeros(UpperCAmelCase__, UpperCAmelCase__, dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(UpperCAmelCase__, UpperCAmelCase__ ) ): A__ : Union[str, Any] = input_row["""sentence"""] A__ : Union[str, Any] = 1 A__ : Dict = torch.stack([row["""image"""] for row in batch] ) A__ : Any = torch.stack([row["""label"""] for row in batch] ) A__ : List[str] = torch.stack([row["""image_start_token"""] for row in batch] ) A__ : List[Any] = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def _lowerCAmelCase ( ) ->Union[str, Any]: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def _lowerCAmelCase ( ) ->Optional[int]: return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017], std=[0.1222_1994, 0.1214_5835, 0.1438_0469], ), ] )
365
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ): '''simple docstring''' A__ : Union[str, Any] = parent A__ : Optional[Any] = batch_size A__ : Dict = seq_length A__ : str = is_training A__ : Tuple = use_input_mask A__ : Dict = use_token_type_ids A__ : Dict = use_labels A__ : int = vocab_size A__ : List[str] = hidden_size A__ : Union[str, Any] = num_hidden_layers A__ : int = num_attention_heads A__ : List[str] = intermediate_size A__ : int = hidden_act A__ : str = hidden_dropout_prob A__ : Tuple = attention_probs_dropout_prob A__ : Any = max_position_embeddings A__ : Optional[int] = type_vocab_size A__ : int = type_sequence_label_size A__ : Optional[Any] = initializer_range A__ : int = num_labels A__ : Optional[int] = num_choices A__ : Optional[int] = scope def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Any = None if self.use_input_mask: A__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) A__ : Optional[int] = None if self.use_token_type_ids: A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Dict = None A__ : List[str] = None A__ : Union[str, Any] = None if self.use_labels: A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Any = ids_tensor([self.batch_size] , self.num_choices ) A__ : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : List[str] ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Any = self.get_config() A__ : List[str] = 300 return config def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Tuple = self.prepare_config_and_inputs() A__ : List[str] = True A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ): '''simple docstring''' A__ : List[str] = MraModel(config=snake_case ) model.to(snake_case ) model.eval() A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) A__ : List[str] = model(snake_case , token_type_ids=snake_case ) A__ : Union[str, Any] = model(snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ): '''simple docstring''' A__ : Dict = True A__ : Optional[Any] = MraModel(snake_case ) model.to(snake_case ) model.eval() A__ : Union[str, Any] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , ) A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , ) A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ): '''simple docstring''' A__ : Dict = MraForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Optional[Any] = MraForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ): '''simple docstring''' A__ : str = self.num_labels A__ : Union[str, Any] = MraForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ): '''simple docstring''' A__ : List[str] = self.num_choices A__ : str = MraForMultipleChoice(config=snake_case ) model.to(snake_case ) model.eval() A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : str = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : List[str] = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Dict = config_and_inputs A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ): snake_case_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = () def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Optional[Any] = MraModelTester(self ) A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ : List[str] = type self.model_tester.create_and_check_model(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case ) def _UpperCamelCase ( self : Optional[int] ): '''simple docstring''' A__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) def _UpperCamelCase ( self : int ): '''simple docstring''' A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) @slow def _UpperCamelCase ( self : Any ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : str = MraModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip(reason="""MRA does not output attentions""" ) def _UpperCamelCase ( self : Tuple ): '''simple docstring''' return @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _UpperCamelCase ( self : Union[str, Any] ): '''simple docstring''' A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Any = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : List[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , snake_case ) A__ : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : List[Any] ): '''simple docstring''' A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) A__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Dict = 5_0265 A__ : List[str] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : List[Any] = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : Dict ): '''simple docstring''' A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): A__ : List[Any] = model(snake_case )[0] A__ : Union[str, Any] = 5_0265 A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , snake_case ) A__ : Optional[int] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
296
0
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = (IPNDMScheduler,) lowerCAmelCase__ = (("""num_inference_steps""", 50),) def __lowerCamelCase ( self : List[str] , **_lowerCAmelCase : int): '''simple docstring''' __lowercase ={'num_train_timesteps': 1_0_0_0} config.update(**_lowerCAmelCase) return config def __lowerCamelCase ( self : str , _lowerCAmelCase : List[Any]=0 , **_lowerCAmelCase : List[Any]): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , _lowerCAmelCase) __lowercase =self.dummy_sample __lowercase =0.1 * sample __lowercase =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config(**_lowerCAmelCase) __lowercase =scheduler_class(**_lowerCAmelCase) scheduler.set_timesteps(_lowerCAmelCase) # copy over dummy past residuals __lowercase =dummy_past_residuals[:] if time_step is None: __lowercase =scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase) __lowercase =scheduler_class.from_pretrained(_lowerCAmelCase) new_scheduler.set_timesteps(_lowerCAmelCase) # copy over dummy past residuals __lowercase =dummy_past_residuals[:] __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def __lowerCamelCase ( self : List[Any]): '''simple docstring''' pass def __lowerCamelCase ( self : Any , _lowerCAmelCase : str=0 , **_lowerCAmelCase : int): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , _lowerCAmelCase) __lowercase =self.dummy_sample __lowercase =0.1 * sample __lowercase =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config() __lowercase =scheduler_class(**_lowerCAmelCase) scheduler.set_timesteps(_lowerCAmelCase) # copy over dummy past residuals (must be after setting timesteps) __lowercase =dummy_past_residuals[:] if time_step is None: __lowercase =scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase) __lowercase =scheduler_class.from_pretrained(_lowerCAmelCase) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase) # copy over dummy past residual (must be after setting timesteps) __lowercase =dummy_past_residuals[:] __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def __lowerCamelCase ( self : Optional[Any] , **_lowerCAmelCase : Dict): '''simple docstring''' __lowercase =self.scheduler_classes[0] __lowercase =self.get_scheduler_config(**_lowerCAmelCase) __lowercase =scheduler_class(**_lowerCAmelCase) __lowercase =1_0 __lowercase =self.dummy_model() __lowercase =self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase) for i, t in enumerate(scheduler.timesteps): __lowercase =model(_lowerCAmelCase , _lowerCAmelCase) __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase).prev_sample for i, t in enumerate(scheduler.timesteps): __lowercase =model(_lowerCAmelCase , _lowerCAmelCase) __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase).prev_sample return sample def __lowerCamelCase ( self : Any): '''simple docstring''' __lowercase =dict(self.forward_default_kwargs) __lowercase =kwargs.pop('num_inference_steps' , _lowerCAmelCase) for scheduler_class in self.scheduler_classes: __lowercase =self.get_scheduler_config() __lowercase =scheduler_class(**_lowerCAmelCase) __lowercase =self.dummy_sample __lowercase =0.1 * sample if num_inference_steps is not None and hasattr(_lowerCAmelCase , 'set_timesteps'): scheduler.set_timesteps(_lowerCAmelCase) elif num_inference_steps is not None and not hasattr(_lowerCAmelCase , 'set_timesteps'): __lowercase =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowercase =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __lowercase =dummy_past_residuals[:] __lowercase =scheduler.timesteps[5] __lowercase =scheduler.timesteps[6] __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample __lowercase =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __lowerCamelCase ( self : List[str]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase , time_step=_lowerCAmelCase) def __lowerCamelCase ( self : List[str]): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=_lowerCAmelCase) def __lowerCamelCase ( self : str): '''simple docstring''' __lowercase =self.full_loop() __lowercase =torch.mean(torch.abs(_lowerCAmelCase)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
166
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if (ksize % 2) == 0: __lowercase =ksize + 1 __lowercase =np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(_lowerCAmelCase ): for x in range(_lowerCAmelCase ): # distance from center __lowercase =x - ksize // 2 __lowercase =y - ksize // 2 # degree to radiant __lowercase =theta / 180 * np.pi __lowercase =np.cos(_theta ) __lowercase =np.sin(_theta ) # get kernel x __lowercase =cos_theta * px + sin_theta * py # get kernel y __lowercase =-sin_theta * px + cos_theta * py # fill kernel __lowercase =np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image lowerCamelCase = imread("""../image_data/lena.jpg""") # turn image in gray scale value lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges lowerCamelCase = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) lowerCamelCase = out / out.max() * 255 lowerCamelCase = out.astype(np.uinta) imshow("""Original""", gray) imshow("""Gabor filter with 20x20 mask and 6 directions""", out) waitKey(0)
166
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''OwlViTFeatureExtractor'''] a = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
358
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : "DiagonalGaussianDistribution" class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = True @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase : Tuple[int] = (64,) , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "silu" , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : float = 0.1_8215 , ): super().__init__() # pass init params to Encoder _A = Encoder( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , ) # pass init params to Decoder _A = Decoder( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , ) _A = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) _A = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 ) _A = False _A = False # only relevant if vae tiling is enabled _A = self.config.sample_size _A = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) _A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) _A = 0.25 def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ): if isinstance(_UpperCAmelCase , (Encoder, Decoder) ): _A = value def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : bool = True ): _A = use_tiling def lowerCAmelCase_ ( self : Union[str, Any] ): self.enable_tiling(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = True def lowerCAmelCase_ ( self : str ): _A = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCAmelCase_ ( self : str ): _A = {} def fn_recursive_add_processors(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : Dict[str, AttentionProcessor] ): if hasattr(_UpperCAmelCase , 'set_processor' ): _A = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return processors def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): _A = len(self.attn_processors.keys() ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : int ): if hasattr(_UpperCAmelCase , 'set_processor' ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): module.set_processor(_UpperCAmelCase ) else: module.set_processor(processor.pop(F'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def lowerCAmelCase_ ( self : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase ) if self.use_slicing and x.shape[0] > 1: _A = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )] _A = torch.cat(_UpperCAmelCase ) else: _A = self.encoder(_UpperCAmelCase ) _A = self.quant_conv(_UpperCAmelCase ) _A = DiagonalGaussianDistribution(_UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase ) _A = self.post_quant_conv(_UpperCAmelCase ) _A = self.decoder(_UpperCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCAmelCase ) @apply_forward_hook def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ): if self.use_slicing and z.shape[0] > 1: _A = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )] _A = torch.cat(_UpperCAmelCase ) else: _A = self._decode(_UpperCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ): _A = min(a.shape[2] , b.shape[2] , _UpperCAmelCase ) for y in range(_UpperCAmelCase ): _A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ): _A = min(a.shape[3] , b.shape[3] , _UpperCAmelCase ) for x in range(_UpperCAmelCase ): _A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ): _A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) _A = int(self.tile_latent_min_size * self.tile_overlap_factor ) _A = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. _A = [] for i in range(0 , x.shape[2] , _UpperCAmelCase ): _A = [] for j in range(0 , x.shape[3] , _UpperCAmelCase ): _A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] _A = self.encoder(_UpperCAmelCase ) _A = self.quant_conv(_UpperCAmelCase ) row.append(_UpperCAmelCase ) rows.append(_UpperCAmelCase ) _A = [] for i, row in enumerate(_UpperCAmelCase ): _A = [] for j, tile in enumerate(_UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase ) if j > 0: _A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) ) _A = torch.cat(_UpperCAmelCase , dim=2 ) _A = DiagonalGaussianDistribution(_UpperCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ): _A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) _A = int(self.tile_sample_min_size * self.tile_overlap_factor ) _A = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. _A = [] for i in range(0 , z.shape[2] , _UpperCAmelCase ): _A = [] for j in range(0 , z.shape[3] , _UpperCAmelCase ): _A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] _A = self.post_quant_conv(_UpperCAmelCase ) _A = self.decoder(_UpperCAmelCase ) row.append(_UpperCAmelCase ) rows.append(_UpperCAmelCase ) _A = [] for i, row in enumerate(_UpperCAmelCase ): _A = [] for j, tile in enumerate(_UpperCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: _A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase ) if j > 0: _A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) ) _A = torch.cat(_UpperCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[torch.Generator] = None , ): _A = sample _A = self.encode(_UpperCAmelCase ).latent_dist if sample_posterior: _A = posterior.sample(generator=_UpperCAmelCase ) else: _A = posterior.mode() _A = self.decode(_UpperCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCAmelCase )
271
0
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) __a = logging.getLogger(__name__) def a ( ): '''simple docstring''' lowercase_ = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=_UpperCamelCase , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=_UpperCamelCase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=_UpperCamelCase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=_UpperCamelCase , default='''data/dump''' , help='''The dump file prefix.''' ) lowercase_ = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowercase_ = BertTokenizer.from_pretrained(args.tokenizer_name ) lowercase_ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowercase_ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowercase_ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowercase_ = tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowercase_ = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowercase_ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowercase_ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowercase_ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowercase_ = fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(_UpperCamelCase )} examples to process.''' ) lowercase_ = [] lowercase_ = 0 lowercase_ = 10_000 lowercase_ = time.time() for text in data: lowercase_ = F'''{bos} {text.strip()} {sep}''' lowercase_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) rslt.append(_UpperCamelCase ) iter += 1 if iter % interval == 0: lowercase_ = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowercase_ = time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(_UpperCamelCase )} examples processed.''' ) lowercase_ = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowercase_ = tokenizer.vocab_size if vocab_size < (1 << 16): lowercase_ = [np.uintaa(_UpperCamelCase ) for d in rslt] else: lowercase_ = [np.intaa(_UpperCamelCase ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(rslt_ , _UpperCamelCase , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
30
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( lowerCAmelCase__ ): lowerCAmelCase__ = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __lowerCamelCase = size if size is not None else {'''shortest_edge''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = resample __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_rescale __lowerCamelCase = rescale_factor __lowerCamelCase = do_normalize __lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD __lowerCamelCase = do_convert_rgb def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __lowerCamelCase = do_resize if do_resize is not None else self.do_resize __lowerCamelCase = size if size is not None else self.size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = resample if resample is not None else self.resample __lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __lowerCamelCase = crop_size if crop_size is not None else self.crop_size __lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) __lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale __lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize __lowerCamelCase = image_mean if image_mean is not None else self.image_mean __lowerCamelCase = image_std if image_std is not None else self.image_std __lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowerCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: __lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: __lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: __lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: __lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] __lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __lowerCamelCase = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
330
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = CycleDiffusionPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } __UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) A_ : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_000 , clip_sample=snake_case , set_alpha_to_one=snake_case , ) torch.manual_seed(0 ) A_ : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) A_ : str = CLIPTextModel(snake_case ) A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ : List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :List[Any] , snake_case :Any=0 ): '''simple docstring''' A_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case ) A_ : Dict = image / 2 + 0.5 if str(snake_case ).startswith("mps" ): A_ : Optional[Any] = torch.manual_seed(snake_case ) else: A_ : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(snake_case ) A_ : Dict = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : Optional[int] = self.get_dummy_components() A_ : List[Any] = CycleDiffusionPipeline(**snake_case ) A_ : List[str] = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Optional[int] = self.get_dummy_inputs(snake_case ) A_ : List[Any] = pipe(**snake_case ) A_ : Optional[Any] = output.images A_ : Any = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : List[str] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' A_ : Optional[int] = self.get_dummy_components() for name, module in components.items(): if hasattr(snake_case , "half" ): A_ : int = module.half() A_ : List[Any] = CycleDiffusionPipeline(**snake_case ) A_ : int = pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) A_ : Union[str, Any] = self.get_dummy_inputs(snake_case ) A_ : Optional[Any] = pipe(**snake_case ) A_ : List[Any] = output.images A_ : Optional[int] = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) A_ : Any = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) A_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) A_ : List[str] = init_image.resize((512, 512) ) A_ : List[str] = "CompVis/stable-diffusion-v1-4" A_ : str = DDIMScheduler.from_pretrained(snake_case , subfolder="scheduler" ) A_ : List[Any] = CycleDiffusionPipeline.from_pretrained( snake_case , scheduler=snake_case , safety_checker=snake_case , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() A_ : Dict = "A black colored car" A_ : int = "A blue colored car" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=snake_case , source_prompt=snake_case , image=snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case , output_type="np" , ) A_ : Tuple = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' A_ : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) A_ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) A_ : Optional[int] = init_image.resize((512, 512) ) A_ : Union[str, Any] = "CompVis/stable-diffusion-v1-4" A_ : List[str] = DDIMScheduler.from_pretrained(snake_case , subfolder="scheduler" ) A_ : Tuple = CycleDiffusionPipeline.from_pretrained(snake_case , scheduler=snake_case , safety_checker=snake_case ) pipe.to(snake_case ) pipe.set_progress_bar_config(disable=snake_case ) pipe.enable_attention_slicing() A_ : Union[str, Any] = "A black colored car" A_ : Optional[Any] = "A blue colored car" A_ : Optional[int] = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=snake_case , source_prompt=snake_case , image=snake_case , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case , output_type="np" , ) A_ : Union[str, Any] = output.images assert np.abs(image - expected_image ).max() < 2e-2
70
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class __magic_name__ ( lowerCamelCase__ ): """simple docstring""" __UpperCamelCase = '''falcon''' __UpperCamelCase = ['''past_key_values'''] def __init__( self :List[Any] , snake_case :Optional[int]=65_024 , snake_case :Tuple=4_544 , snake_case :Dict=32 , snake_case :Union[str, Any]=71 , snake_case :List[Any]=1e-5 , snake_case :Union[str, Any]=0.02 , snake_case :List[Any]=True , snake_case :Union[str, Any]=0.0 , snake_case :int=0.0 , snake_case :Union[str, Any]=None , snake_case :Dict=False , snake_case :int=False , snake_case :Tuple=True , snake_case :str=True , snake_case :List[Any]=False , snake_case :Optional[Any]=11 , snake_case :Tuple=11 , **snake_case :List[Any] , ): '''simple docstring''' A_ : Optional[int] = vocab_size # Backward compatibility with n_embed kwarg A_ : Any = kwargs.pop("n_embed" , snake_case ) A_ : str = hidden_size if n_embed is None else n_embed A_ : List[str] = num_hidden_layers A_ : List[str] = num_attention_heads A_ : List[str] = layer_norm_epsilon A_ : Optional[Any] = initializer_range A_ : Optional[int] = use_cache A_ : str = hidden_dropout A_ : str = attention_dropout A_ : str = bos_token_id A_ : List[str] = eos_token_id A_ : Union[str, Any] = num_attention_heads if num_kv_heads is None else num_kv_heads A_ : int = alibi A_ : str = new_decoder_architecture A_ : Dict = multi_query # Ignored when new_decoder_architecture is True A_ : Any = parallel_attn A_ : Optional[Any] = bias super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) @property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' return self.hidden_size // self.num_attention_heads @property def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' return not self.alibi
70
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : str = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = DebertaVaTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE__ : str = True SCREAMING_SNAKE_CASE__ : List[Any] = True def A_ ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = "this is a test" UpperCAmelCase : str = "this is a test" return input_text, output_text def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "<pad>" UpperCAmelCase : str = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(snake_case ) , 3_0_0_0_1 ) def A_ ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = " \tHeLLo!how \n Are yoU? " UpperCAmelCase : Dict = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , do_lower_case=snake_case ) UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case ) UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def A_ ( self ): '''simple docstring''' pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def A_ ( self ): '''simple docstring''' pass def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé." UpperCAmelCase : Dict = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : List[str] = DebertaVaTokenizer(snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , split_by_punct=snake_case ) UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[int] = "I was born in 92000, and this is falsé." UpperCAmelCase : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : int = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase : int = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on UpperCAmelCase : Optional[Any] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = " \tHeLLo!how \n Are yoU? " UpperCAmelCase : Any = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on UpperCAmelCase : Tuple = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer() UpperCAmelCase : Dict = "I was born in 92000, and this is falsé." UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case ) UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = self.get_rust_tokenizer() UpperCAmelCase : Tuple = tokenizer.encode(snake_case ) UpperCAmelCase : List[Any] = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : List[Any] = "This is a test" UpperCAmelCase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] UpperCAmelCase : Any = ["▁", "T", "his", "▁is", "▁a", "▁test"] UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] UpperCAmelCase : Dict = DebertaVaTokenizer(snake_case , keep_accents=snake_case ) UpperCAmelCase : List[str] = DebertaVaTokenizerFast(snake_case , keep_accents=snake_case ) UpperCAmelCase : str = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) # fmt: off UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé." UpperCAmelCase : List[str] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] UpperCAmelCase : List[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : int = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : List[str] = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCAmelCase : str = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) def A_ ( self ): '''simple docstring''' UpperCAmelCase : Tuple = DebertaVaTokenizer(snake_case ) UpperCAmelCase : str = tokenizer.encode("sequence builders" ) UpperCAmelCase : List[str] = tokenizer.encode("multi-sequence build" ) UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case ) UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case , ) @slow def A_ ( self ): '''simple docstring''' UpperCAmelCase : Optional[Any] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
311
'''simple docstring''' from jiwer import compute_measures import datasets a : List[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" a : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n" a : Union[str, Any] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def A_ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def A_ ( self , snake_case=None , snake_case=None , snake_case=False ): '''simple docstring''' if concatenate_texts: return compute_measures(snake_case , snake_case )["wer"] else: UpperCAmelCase : Dict = 0 UpperCAmelCase : Optional[Any] = 0 for prediction, reference in zip(snake_case , snake_case ): UpperCAmelCase : Tuple = compute_measures(snake_case , snake_case ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
311
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[str] ) -> List[str]: '''simple docstring''' for attribute in key.split(""".""" ): UpperCAmelCase_= getattr(lowerCAmelCase_ ,lowerCAmelCase_ ) if weight_type is not None: UpperCAmelCase_= getattr(lowerCAmelCase_ ,lowerCAmelCase_ ).shape else: UpperCAmelCase_= hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase_= value elif weight_type == "weight_g": UpperCAmelCase_= value elif weight_type == "weight_v": UpperCAmelCase_= value elif weight_type == "bias": UpperCAmelCase_= value else: UpperCAmelCase_= value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_= [] UpperCAmelCase_= fairseq_model.state_dict() UpperCAmelCase_= hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_= False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,hf_model.config.feat_extract_norm == """group""" ,) UpperCAmelCase_= True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_= """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): UpperCAmelCase_= True if "*" in mapped_key: UpperCAmelCase_= name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] UpperCAmelCase_= mapped_key.replace("""*""" ,lowerCAmelCase_ ) if "weight_g" in name: UpperCAmelCase_= """weight_g""" elif "weight_v" in name: UpperCAmelCase_= """weight_v""" elif "weight" in name: UpperCAmelCase_= """weight""" elif "bias" in name: UpperCAmelCase_= """bias""" else: UpperCAmelCase_= None set_recursively(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ) -> List[Any]: '''simple docstring''' UpperCAmelCase_= full_name.split("""conv_layers.""" )[-1] UpperCAmelCase_= name.split(""".""" ) UpperCAmelCase_= int(items[0] ) UpperCAmelCase_= int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase_= value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase_= value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCAmelCase_= value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase_= value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int ,lowerCAmelCase_ : List[Any]=None ,lowerCAmelCase_ : Tuple=None ,lowerCAmelCase_ : int=True ) -> Tuple: '''simple docstring''' if config_path is not None: UpperCAmelCase_= HubertConfig.from_pretrained(lowerCAmelCase_ ) else: UpperCAmelCase_= HubertConfig() if is_finetuned: if dict_path: UpperCAmelCase_= Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_= target_dict.pad_index UpperCAmelCase_= target_dict.bos_index UpperCAmelCase_= target_dict.eos_index UpperCAmelCase_= len(target_dict.symbols ) UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,"""vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices ,lowerCAmelCase_ ) UpperCAmelCase_= WavaVecaCTCTokenizer( lowerCAmelCase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=lowerCAmelCase_ ,) UpperCAmelCase_= True if config.feat_extract_norm == """layer""" else False UpperCAmelCase_= WavaVecaFeatureExtractor( feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=lowerCAmelCase_ ,return_attention_mask=lowerCAmelCase_ ,) UpperCAmelCase_= WavaVecaProcessor(feature_extractor=lowerCAmelCase_ ,tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_= HubertForCTC(lowerCAmelCase_ ) else: UpperCAmelCase_= HubertModel(lowerCAmelCase_ ) if is_finetuned: UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase_= model[0].eval() recursively_load_weights(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __A = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
277
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __A = 16 __A = 32 def __a ( lowerCAmelCase_ : Accelerator ,lowerCAmelCase_ : int = 16 ,lowerCAmelCase_ : str = "bert-base-cased" ) -> Tuple: '''simple docstring''' UpperCAmelCase_= AutoTokenizer.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_= load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowerCAmelCase_ : List[Any] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_= tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_= datasets.map( lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowerCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_= tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowerCAmelCase_ : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase_ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase_ ,padding="""longest""" ,return_tensors="""pt""" ) # Instantiate dataloaders. UpperCAmelCase_= DataLoader( tokenized_datasets["""train"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ ) UpperCAmelCase_= DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : List[str] ) -> int: '''simple docstring''' UpperCAmelCase_= Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_= config["""lr"""] UpperCAmelCase_= int(config["""num_epochs"""] ) UpperCAmelCase_= int(config["""seed"""] ) UpperCAmelCase_= int(config["""batch_size"""] ) UpperCAmelCase_= args.model_name_or_path set_seed(lowerCAmelCase_ ) UpperCAmelCase_, UpperCAmelCase_= get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_= AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,return_dict=lowerCAmelCase_ ) # Instantiate optimizer UpperCAmelCase_= ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_= optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_= accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCAmelCase_= 1 UpperCAmelCase_= (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_= get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase_ ,) else: UpperCAmelCase_= DummyScheduler(lowerCAmelCase_ ,total_num_steps=lowerCAmelCase_ ,warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare( lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_= 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_= 0 # Now we train the model UpperCAmelCase_= evaluate.load("""glue""" ,"""mrpc""" ) UpperCAmelCase_= 0 UpperCAmelCase_= {} for epoch in range(lowerCAmelCase_ ,lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): UpperCAmelCase_= model(**lowerCAmelCase_ ) UpperCAmelCase_= outputs.loss UpperCAmelCase_= loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCAmelCase_= 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_= model(**lowerCAmelCase_ ) UpperCAmelCase_= outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_, UpperCAmelCase_= accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase_ ) - 1: UpperCAmelCase_= predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_= references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,) UpperCAmelCase_= metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" ,lowerCAmelCase_ ) UpperCAmelCase_= eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: UpperCAmelCase_= eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f: json.dump(lowerCAmelCase_ ,lowerCAmelCase_ ) def __a ( ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_= argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" ,type=lowerCAmelCase_ ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowerCAmelCase_ ,) parser.add_argument( """--output_dir""" ,type=lowerCAmelCase_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,) parser.add_argument( """--performance_lower_bound""" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,) parser.add_argument( """--num_epochs""" ,type=lowerCAmelCase_ ,default=3 ,help="""Number of train epochs.""" ,) UpperCAmelCase_= parser.parse_args() UpperCAmelCase_= {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase_ ,lowerCAmelCase_ ) if __name__ == "__main__": main()
277
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A__: List[str] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: List[Any] = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__: Optional[int] = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
276
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A__ ( UpperCAmelCase__ ): __UpperCamelCase : torch.FloatTensor class A__ ( UpperCAmelCase__ , UpperCAmelCase__ ): @register_to_config def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE :Tuple[int] = (6_4,) , SCREAMING_SNAKE_CASE :int = 1 , SCREAMING_SNAKE_CASE :str = "silu" , SCREAMING_SNAKE_CASE :int = 3 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :int = 2_5_6 , SCREAMING_SNAKE_CASE :int = 3_2 , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :float = 0.18_215 , SCREAMING_SNAKE_CASE :str = "group" , ) -> Optional[int]: '''simple docstring''' super().__init__() # pass init params to Encoder _a : Union[str, Any] =Encoder( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , ) _a : Optional[int] =vq_embed_dim if vq_embed_dim is not None else latent_channels _a : Optional[int] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 ) _a : str =VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE ) _a : List[str] =nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1 ) # pass init params to Decoder _a : List[str] =Decoder( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , ) @apply_forward_hook def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> VQEncoderOutput: '''simple docstring''' _a : Optional[int] =self.encoder(SCREAMING_SNAKE_CASE ) _a : int =self.quant_conv(SCREAMING_SNAKE_CASE ) if not return_dict: return (h,) return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE ) @apply_forward_hook def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' # also go through quantization layer if not force_not_quantize: _a , _a , _a : Tuple =self.quantize(SCREAMING_SNAKE_CASE ) else: _a : str =h _a : Dict =self.post_quant_conv(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :torch.FloatTensor , SCREAMING_SNAKE_CASE :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: '''simple docstring''' _a : Tuple =sample _a : int =self.encode(SCREAMING_SNAKE_CASE ).latents _a : List[Any] =self.decode(SCREAMING_SNAKE_CASE ).sample if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE )
276
1
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
92
'''simple docstring''' import argparse from collections import defaultdict import yaml _SCREAMING_SNAKE_CASE : Optional[Any] = "docs/source/en/_toctree.yml" def UpperCamelCase_( snake_case : Optional[Any] ): '''simple docstring''' snake_case_ = defaultdict(snake_case ) for doc in model_doc: counts[doc["local"]] += 1 snake_case_ = [key for key, value in counts.items() if value > 1] snake_case_ = [] for duplicate_key in duplicates: snake_case_ = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} ) if len(snake_case ) > 1: raise ValueError( f'{duplicate_key} is present several times in the documentation table of content at ' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] ) # Sort return sorted(snake_case , key=lambda snake_case : s["title"].lower() ) def UpperCamelCase_( snake_case : Optional[int]=False ): '''simple docstring''' with open(snake_case , encoding="utf-8" ) as f: snake_case_ = yaml.safe_load(f.read() ) # Get to the API doc snake_case_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 snake_case_ = content[api_idx]["sections"] # Then to the model doc snake_case_ = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 snake_case_ = api_doc[model_idx]["sections"] snake_case_ = [(idx, section) for idx, section in enumerate(snake_case ) if "sections" in section] snake_case_ = False for idx, modality_doc in modalities_docs: snake_case_ = modality_doc["sections"] snake_case_ = clean_model_doc_toc(snake_case ) if old_modality_doc != new_modality_doc: snake_case_ = True if overwrite: snake_case_ = new_modality_doc if diff: if overwrite: snake_case_ = model_doc snake_case_ = api_doc with open(snake_case , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(snake_case , allow_unicode=snake_case ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _SCREAMING_SNAKE_CASE : Any = parser.parse_args() check_model_doc(args.fix_and_overwrite)
92
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __a = { "/attention/": "/0/SelfAttention/", "/self_attention/": "/0/SelfAttention/", "/encoder_decoder_attention/": "/1/EncDecAttention/", "value": "v", "query": "q", "key": "k", "out": "o", "pre_self_attention_layer_norm": "0/layer_norm", "pre_cross_attention_layer_norm": "1/layer_norm", "pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong "token_embedder": "shared", "encoder_norm": "final_layer_norm", "decoder_norm": "final_layer_norm", "relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight", "router/router_weights/w/": "router/classifier/", "roer/roer_weights/w/": "router/classifier/", "logits_dense": "lm_head", } def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Optional[Any] = list(s_dict.keys() ) for key in keys: snake_case_ :Dict = r""".*/layers_(\d+)""" snake_case_ :Dict = key if re.match(_lowercase, _lowercase ): snake_case_ :Dict = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", _lowercase ) snake_case_ :List[Any] = r"""(encoder|decoder)\/""" if re.match(_lowercase, _lowercase ): snake_case_ :Any = re.match(_lowercase, _lowercase ).groups() if groups[0] == "encoder": snake_case_ :str = re.sub(r"""/mlp/""", r"""/1/mlp/""", _lowercase ) snake_case_ :Optional[Any] = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", _lowercase ) elif groups[0] == "decoder": snake_case_ :int = re.sub(r"""/mlp/""", r"""/2/mlp/""", _lowercase ) snake_case_ :Optional[Any] = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", _lowercase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: snake_case_ :Union[str, Any] = new_key.replace(_lowercase, _lowercase ) print(f"""{key} -> {new_key}""" ) snake_case_ :List[Any] = s_dict.pop(_lowercase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case_ :List[Any] = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case_ :Tuple = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: snake_case_ :Tuple = s_dict[key].shape[0] snake_case_ :int = s_dict[key] for idx in range(_lowercase ): snake_case_ :Union[str, Any] = expert_weihts[idx] print(f"""{key} -> {key.replace('expert/', 'nested fstring' )}""" ) s_dict.pop(_lowercase ) return s_dict __a = { "NUM_ENCODER_LAYERS": "num_layers", "NUM_DECODER_LAYERS": "num_decoder_layers", "NUM_HEADS": "num_heads", "HEAD_DIM": "d_kv", "EMBED_DIM": "d_model", "MLP_DIM": "d_ff", "NUM_SELECTED_EXPERTS": "num_selected_experts", "NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers", "NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers", "dense.MlpBlock.activations": "feed_forward_proj", } def A_ ( _lowercase, _lowercase ): '''simple docstring''' import regex as re with open(_lowercase, """r""" ) as f: snake_case_ :Tuple = f.read() snake_case_ :Dict = re.findall(r"""(.*) = ([0-9.]*)""", _lowercase ) snake_case_ :Union[str, Any] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": snake_case_ :int = float(_lowercase ) if """.""" in value else int(_lowercase ) snake_case_ :Dict = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", _lowercase )[0] snake_case_ :Optional[int] = str(activation[1] ) snake_case_ :Dict = num_experts snake_case_ :int = SwitchTransformersConfig(**_lowercase ) return config def A_ ( _lowercase, _lowercase, _lowercase=None, _lowercase="./", _lowercase=8 ): '''simple docstring''' print(f"""Loading flax weights from : {flax_checkpoint_path}""" ) snake_case_ :List[Any] = checkpoints.load_tax_checkpoint(_lowercase ) if gin_file is not None: snake_case_ :Dict = convert_gin_to_config(_lowercase, _lowercase ) else: snake_case_ :Optional[Any] = SwitchTransformersConfig.from_pretrained(_lowercase ) snake_case_ :Union[str, Any] = SwitchTransformersForConditionalGeneration(_lowercase ) snake_case_ :Tuple = flax_params["""target"""] snake_case_ :Any = flatten_dict(_lowercase, sep="""/""" ) snake_case_ :int = rename_keys(_lowercase ) snake_case_ :List[Any] = unflatten_dict(_lowercase, sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_lowercase, _lowercase ) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") __a = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
66
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule _A = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
171
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase = Path(tmpdirname) UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCAmelCase = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test UpperCAmelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCAmelCase = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
355
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ ( self ): snake_case_ = ['''a''', '''b''', '''c'''] # Defaults to last layer if both are None snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , ['''c'''] ) self.assertEqual(_UpperCAmelCase , [2] ) # Out indices set to match out features snake_case_ , snake_case_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCAmelCase , [0, 2] ) # Out features set to match out indices snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCAmelCase , [0, 2] ) # Out features selected from negative indices snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCAmelCase , [-3, -1] ) def UpperCamelCase__ ( self ): # Stage names must be set with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCAmelCase ) # Out features must be a list with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] ) # Out features must be a subset of stage names with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] ) # Out indices must be a list or tuple with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(_UpperCAmelCase , 0 , ['''a''', '''b'''] ) # Out indices must be a subset of stage names with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['''a'''] ) # Out features and out indices must be the same length with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] ) # Out features should match out indices with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] ) # Out features and out indices should be in order with self.assertRaises(_UpperCAmelCase ): verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] ) # Check passes with valid inputs verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] ) def UpperCamelCase__ ( self ): snake_case_ = BackboneMixin() snake_case_ = ['''a''', '''b''', '''c'''] snake_case_ = ['''a''', '''c'''] snake_case_ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly snake_case_ = ['''a''', '''b'''] self.assertEqual(backbone.out_features , ['''a''', '''b'''] ) self.assertEqual(backbone.out_indices , [0, 1] ) snake_case_ = [-3, -1] self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [-3, -1] )
267
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase( __a ): '''simple docstring''' lowercase__ = "Salesforce/blip-image-captioning-base" lowercase__ = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) lowercase__ = "image_captioner" lowercase__ = AutoModelForVisionaSeq lowercase__ = ["image"] lowercase__ = ["text"] def __init__( self: Optional[Any], *a_: List[str], **a_: List[Any] ): '''simple docstring''' requires_backends(self, ["""vision"""] ) super().__init__(*a_, **a_ ) def UpperCamelCase_ ( self: str, a_: "Image" ): '''simple docstring''' return self.pre_processor(images=a_, return_tensors="""pt""" ) def UpperCamelCase_ ( self: Dict, a_: Dict ): '''simple docstring''' return self.model.generate(**a_ ) def UpperCamelCase_ ( self: List[Any], a_: Tuple ): '''simple docstring''' return self.pre_processor.batch_decode(a_, skip_special_tokens=a_ )[0].strip()
64
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ = 16 A_ = 32 def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _snake_case : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ : Any ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : List[Any] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : str = 16 elif accelerator.mixed_precision != "no": _snake_case : Optional[int] = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. _snake_case : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) _snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": _snake_case : List[Any] = 2 # Initialize accelerator _snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config["""lr"""] _snake_case : str = int(config["""num_epochs"""] ) _snake_case : Union[str, Any] = int(config["""seed"""] ) _snake_case : Union[str, Any] = int(config["""batch_size"""] ) _snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case__ ) def inner_training_loop(snake_case__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ ) _snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate scheduler _snake_case : str = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : int = model(**snake_case__ ) _snake_case : str = outputs.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) _snake_case : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , snake_case__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _snake_case : Dict = parser.parse_args() _snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
64
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a_ : Optional[Any] = get_logger(__name__) a_ : Optional[Any] = Path(__file__).parent / 'model_card_template.md' a_ : str = uuida().hex a_ : Optional[int] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES a_ : Optional[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES a_ : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def lowerCamelCase__ (_UpperCAmelCase = None): SCREAMING_SNAKE_CASE = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' , '').upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items()) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): ua += "; " + user_agent return ua def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None): if token is None: SCREAMING_SNAKE_CASE = HfFolder.get_token() if organization is None: SCREAMING_SNAKE_CASE = whoami(__SCREAMING_SNAKE_CASE)["""name"""] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.') if hasattr(__SCREAMING_SNAKE_CASE , 'local_rank') and args.local_rank not in [-1, 0]: return SCREAMING_SNAKE_CASE = args.hub_token if hasattr(__SCREAMING_SNAKE_CASE , 'hub_token') else None SCREAMING_SNAKE_CASE = get_full_repo_name(__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE) SCREAMING_SNAKE_CASE = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , repo_name=__SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(__SCREAMING_SNAKE_CASE , 'dataset_name') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__SCREAMING_SNAKE_CASE , 'gradient_accumulation_steps') else None ) , adam_betaa=args.adam_betaa if hasattr(__SCREAMING_SNAKE_CASE , 'adam_beta1') else None , adam_betaa=args.adam_betaa if hasattr(__SCREAMING_SNAKE_CASE , 'adam_beta2') else None , adam_weight_decay=args.adam_weight_decay if hasattr(__SCREAMING_SNAKE_CASE , 'adam_weight_decay') else None , adam_epsilon=args.adam_epsilon if hasattr(__SCREAMING_SNAKE_CASE , 'adam_epsilon') else None , lr_scheduler=args.lr_scheduler if hasattr(__SCREAMING_SNAKE_CASE , 'lr_scheduler') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__SCREAMING_SNAKE_CASE , 'lr_warmup_steps') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__SCREAMING_SNAKE_CASE , 'ema_inv_gamma') else None , ema_power=args.ema_power if hasattr(__SCREAMING_SNAKE_CASE , 'ema_power') else None , ema_max_decay=args.ema_max_decay if hasattr(__SCREAMING_SNAKE_CASE , 'ema_max_decay') else None , mixed_precision=args.mixed_precision , ) SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , 'README.md') model_card.save(__SCREAMING_SNAKE_CASE) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = None): if resolved_file is None or commit_hash is not None: return commit_hash SCREAMING_SNAKE_CASE = str(Path(__SCREAMING_SNAKE_CASE).as_posix()) SCREAMING_SNAKE_CASE = re.search(R'snapshots/([^/]+)/' , __SCREAMING_SNAKE_CASE) if search is None: return None SCREAMING_SNAKE_CASE = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__SCREAMING_SNAKE_CASE) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a_ : Tuple = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) a_ : Optional[Any] = os.path.join(hf_cache_home, 'diffusers') def lowerCamelCase__ (_UpperCAmelCase = None , _UpperCAmelCase = None): if new_cache_dir is None: SCREAMING_SNAKE_CASE = DIFFUSERS_CACHE if old_cache_dir is None: SCREAMING_SNAKE_CASE = old_diffusers_cache SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE).expanduser() SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*'): if old_blob_path.is_file() and not old_blob_path.is_symlink(): SCREAMING_SNAKE_CASE = new_cache_dir / old_blob_path.relative_to(__SCREAMING_SNAKE_CASE) new_blob_path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE) os.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) try: os.symlink(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.') # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a_ : Optional[int] = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): a_ : int = 0 else: with open(cache_version_file) as f: try: a_ : List[Any] = int(f.read()) except ValueError: a_ : Optional[int] = 0 if cache_version < 1: a_ : Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: a_ : Union[str, Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ 'the directory exists and can be written to.' ) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = None): if variant is not None: SCREAMING_SNAKE_CASE = weights_name.split('.') SCREAMING_SNAKE_CASE = splits[:-1] + [variant] + splits[-1:] SCREAMING_SNAKE_CASE = """.""".join(__SCREAMING_SNAKE_CASE) return weights_name def lowerCamelCase__ (_UpperCAmelCase , *, _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , ): SCREAMING_SNAKE_CASE = str(__SCREAMING_SNAKE_CASE) if os.path.isfile(__SCREAMING_SNAKE_CASE): return pretrained_model_name_or_path elif os.path.isdir(__SCREAMING_SNAKE_CASE): if os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)): # Load from a PyTorch checkpoint SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)): SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''') else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__SCREAMING_SNAKE_CASE).base_version) >= version.parse('0.20.0') ): try: SCREAMING_SNAKE_CASE = hf_hub_download( __SCREAMING_SNAKE_CASE , filename=_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , revision=revision or commit_hash , ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __SCREAMING_SNAKE_CASE , ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)}\' so that the correct variant file can be added.''' , __SCREAMING_SNAKE_CASE , ) try: # 2. Load model file as usual SCREAMING_SNAKE_CASE = hf_hub_download( __SCREAMING_SNAKE_CASE , filename=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , user_agent=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.') except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' 'this model name. Check the model page at ' F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''') except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''') except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''') except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.') except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''')
370
from math import isqrt def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = [True] * max_number for i in range(2 , isqrt(max_number - 1) + 1): if is_prime[i]: for j in range(i**2 , _UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = False return [i for i in range(2 , _UpperCAmelCase) if is_prime[i]] def lowerCamelCase__ (_UpperCAmelCase = 10**8): SCREAMING_SNAKE_CASE = calculate_prime_numbers(max_number // 2) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"""{solution() = }""")
327
0
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __snake_case =sys.version_info >= (3, 10) def a_ ( lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None ): return field(default_factory=lambda: default , metadata=lowerCamelCase ) @dataclass class UpperCAmelCase_ : lowerCamelCase : int lowerCamelCase : float lowerCamelCase : str lowerCamelCase : bool @dataclass class UpperCAmelCase_ : lowerCamelCase : int = 42 lowerCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class UpperCAmelCase_ : lowerCamelCase : bool = False lowerCamelCase : bool = True lowerCamelCase : Optional[bool] = None class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[Any] = '''titi''' lowerCamelCase : List[str] = '''toto''' class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[str] = '''titi''' lowerCamelCase : Any = '''toto''' lowerCamelCase : Union[str, Any] = 42 @dataclass class UpperCAmelCase_ : lowerCamelCase : BasicEnum = "toto" def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: lowerCAmelCase = BasicEnum(self.foo ) @dataclass class UpperCAmelCase_ : lowerCamelCase : MixedTypeEnum = "toto" def __UpperCAmelCase ( self : int ) -> Dict: lowerCAmelCase = MixedTypeEnum(self.foo ) @dataclass class UpperCAmelCase_ : lowerCamelCase : Optional[int] = None lowerCamelCase : Optional[float] = field(default=__lowercase , metadata={'''help''': '''help message'''} ) lowerCamelCase : Optional[str] = None lowerCamelCase : Optional[List[str]] = list_field(default=[] ) lowerCamelCase : Optional[List[int]] = list_field(default=[] ) @dataclass class UpperCAmelCase_ : lowerCamelCase : List[int] = list_field(default=[] ) lowerCamelCase : List[int] = list_field(default=[1, 2, 3] ) lowerCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCAmelCase_ : lowerCamelCase : List[int] = field() lowerCamelCase : str = field() lowerCamelCase : BasicEnum = field() def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: lowerCAmelCase = BasicEnum(self.required_enum ) @dataclass class UpperCAmelCase_ : lowerCamelCase : int lowerCamelCase : "BasicEnum" = field() lowerCamelCase : "Optional[bool]" = None lowerCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} ) lowerCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class UpperCAmelCase_ : lowerCamelCase : bool = False lowerCamelCase : bool = True lowerCamelCase : bool | None = None @dataclass class UpperCAmelCase_ : lowerCamelCase : int | None = None lowerCamelCase : float | None = field(default=__lowercase , metadata={'''help''': '''help message'''} ) lowerCamelCase : str | None = None lowerCamelCase : list[str] | None = list_field(default=[] ) lowerCamelCase : list[int] | None = list_field(default=[] ) class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ) -> Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'} lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , UpperCAmelCase__ ) and yy.get('choices' , UpperCAmelCase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](UpperCAmelCase__ ) , yy['type'](UpperCAmelCase__ ) ) del xx["type"], yy["type"] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--bar' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--baz' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--flag' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ ) self.assertFalse(example.flag ) def __UpperCAmelCase ( self : int ) -> int: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=4_2 , type=UpperCAmelCase__ ) expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Any ) -> str: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) expected.add_argument('--baz' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=UpperCAmelCase__ , dest='baz' ) expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ ) lowerCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase__ ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) lowerCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) ) def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 4_2 ) lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __UpperCAmelCase ( self : int ) -> Dict: @dataclass class UpperCAmelCase_ : lowerCamelCase : Literal["titi", "toto", 42] = "toto" lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCAmelCase = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCAmelCase = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 4_2 ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=UpperCAmelCase__ ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=UpperCAmelCase__ ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual( UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , default=UpperCAmelCase__ , type=UpperCAmelCase__ ) expected.add_argument('--bar' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='help message' ) expected.add_argument('--baz' , default=UpperCAmelCase__ , type=UpperCAmelCase__ ) expected.add_argument('--ces' , nargs='+' , default=[] , type=UpperCAmelCase__ ) expected.add_argument('--des' , nargs='+' , default=[] , type=UpperCAmelCase__ ) lowerCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(UpperCAmelCase__ ) for dataclass_type in dataclass_types: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_args([] ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) ) lowerCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(UpperCAmelCase__ , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def __UpperCAmelCase ( self : Any ) -> List[str]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument('--required_str' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = argparse.ArgumentParser() expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , ) expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ ) expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ ) self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Any: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } lowerCAmelCase = parser.parse_dict(UpperCAmelCase__ )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 4_2, } self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> List[str]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_json' ) os.mkdir(UpperCAmelCase__ ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) lowerCAmelCase = { 'foo': 1_2, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_yaml' ) os.mkdir(UpperCAmelCase__ ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] lowerCAmelCase = BasicExample(**UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Any ) -> int: lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ )
4
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration snake_case : List[str] = "facebook/wmt19-en-de" snake_case : Dict = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model snake_case : List[str] = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) snake_case : int = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt") snake_case : List[str] = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save snake_case : Dict = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
281
0
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase (SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' _snake_case : int = KandinskyVaaPriorPipeline _snake_case : Optional[Any] = ['''prompt'''] _snake_case : List[Any] = ['''prompt''', '''negative_prompt'''] _snake_case : List[Any] = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] _snake_case : str = False @property def __UpperCAmelCase ( self ) -> List[str]: return 3_2 @property def __UpperCAmelCase ( self ) -> str: return 3_2 @property def __UpperCAmelCase ( self ) -> Optional[int]: return self.time_input_dim @property def __UpperCAmelCase ( self ) -> int: return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ) -> Union[str, Any]: return 1_0_0 @property def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __UpperCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(__a ) @property def __UpperCAmelCase ( self ) -> Any: torch.manual_seed(0 ) UpperCAmelCase_ : Any = { 'num_attention_heads': 2, 'attention_head_dim': 1_2, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } UpperCAmelCase_ : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 UpperCAmelCase_ : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __UpperCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) UpperCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Dict = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , ) return image_processor def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.dummy_prior UpperCAmelCase_ : List[str] = self.dummy_image_encoder UpperCAmelCase_ : str = self.dummy_text_encoder UpperCAmelCase_ : List[str] = self.dummy_tokenizer UpperCAmelCase_ : List[str] = self.dummy_image_processor UpperCAmelCase_ : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__a , clip_sample_range=1_0.0 , ) UpperCAmelCase_ : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any: if str(__a ).startswith('mps' ): UpperCAmelCase_ : List[str] = torch.manual_seed(__a ) else: UpperCAmelCase_ : List[str] = torch.Generator(device=__a ).manual_seed(__a ) UpperCAmelCase_ : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : str = 'cpu' UpperCAmelCase_ : List[str] = self.get_dummy_components() UpperCAmelCase_ : Tuple = self.pipeline_class(**__a ) UpperCAmelCase_ : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) UpperCAmelCase_ : List[str] = output.image_embeds UpperCAmelCase_ : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -1_0:] UpperCAmelCase_ : Any = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) UpperCAmelCase_ : List[Any] = np.array( [-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = torch_device == 'cpu' UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Dict = torch_device == 'cpu' UpperCAmelCase_ : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
350
from __future__ import annotations def lowercase__ ( __snake_case : list[int] , __snake_case : int ): '''simple docstring''' if len(__snake_case ) < k or k < 0: raise ValueError('Invalid Input' ) UpperCAmelCase_ : int = sum(array[:k] ) for i in range(len(__snake_case ) - k ): UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k] UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __UpperCAmelCase = [randint(-1000, 1000) for i in range(100)] __UpperCAmelCase = randint(0, 110) print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
145
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ = logging.get_logger(__name__) A__ = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class a ( __lowerCamelCase , __lowerCamelCase ): __lowerCAmelCase : Any = """dinat""" __lowerCAmelCase : Dict = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :Dict ,__lowercase :Optional[int]=4 ,__lowercase :Union[str, Any]=3 ,__lowercase :List[Any]=6_4 ,__lowercase :int=[3, 4, 6, 5] ,__lowercase :str=[2, 4, 8, 1_6] ,__lowercase :List[str]=7 ,__lowercase :List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,__lowercase :Union[str, Any]=3.0 ,__lowercase :Optional[int]=True ,__lowercase :Optional[Any]=0.0 ,__lowercase :Optional[int]=0.0 ,__lowercase :Any=0.1 ,__lowercase :List[Any]="gelu" ,__lowercase :int=0.02 ,__lowercase :Dict=1e-5 ,__lowercase :List[Any]=0.0 ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,**__lowercase :Dict ,): super().__init__(**__lowercase ) snake_case__ : str = patch_size snake_case__ : int = num_channels snake_case__ : List[str] = embed_dim snake_case__ : Any = depths snake_case__ : Optional[int] = len(__lowercase ) snake_case__ : Any = num_heads snake_case__ : Dict = kernel_size snake_case__ : Union[str, Any] = dilations snake_case__ : int = mlp_ratio snake_case__ : Any = qkv_bias snake_case__ : Dict = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : str = drop_path_rate snake_case__ : str = hidden_act snake_case__ : Optional[Any] = layer_norm_eps snake_case__ : Tuple = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : Any = int(embed_dim * 2 ** (len(__lowercase ) - 1) ) snake_case__ : Tuple = layer_scale_init_value snake_case__ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowercase ) + 1 )] snake_case__ : Tuple = get_aligned_output_features_output_indices( out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
369
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool: """simple docstring""" snake_case__ : Optional[int] = len(__lowerCAmelCase ) + 1 snake_case__ : Tuple = len(__lowerCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. snake_case__ : str = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )] # since string of zero length match pattern of zero length snake_case__ : int = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , __lowerCAmelCase ): snake_case__ : Dict = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , __lowerCAmelCase ): snake_case__ : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , __lowerCAmelCase ): for j in range(1 , __lowerCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": snake_case__ : Dict = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: snake_case__ : Union[str, Any] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): snake_case__ : List[str] = dp[i - 1][j] else: snake_case__ : Union[str, Any] = 0 else: snake_case__ : Tuple = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A__ = '''aab''' A__ = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
44
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase__ = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) lowerCamelCase__ = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) lowerCamelCase__ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) lowerCamelCase__ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) lowerCamelCase__ = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]), ("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) lowerCamelCase__ = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) lowerCamelCase__ = ( ("""JH AH TH KH QH""", 23), ("""JH 9H TH KH QH""", 22), ("""JC KH JS JD JH""", 21), ("""KH KC 3S 3H 3D""", 20), ("""8C 9C 5C 3C TC""", 19), ("""JS QS 9H TS KH""", 18), ("""7C 7S KH 2H 7H""", 17), ("""3C KH 5D 5S KH""", 16), ("""QH 8H KD JH 8S""", 15), ("""2D 6D 9D TH 7D""", 14), ) def lowerCAmelCase__ ( ): """simple docstring""" __a = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) ) __a = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] __a = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 100 ): """simple docstring""" return (generate_random_hand() for _ in range(UpperCamelCase__ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" assert PokerHand(UpperCamelCase__ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" assert PokerHand(UpperCamelCase__ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" __a = PokerHand(UpperCamelCase__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" assert PokerHand(UpperCamelCase__ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase__ ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected def lowerCAmelCase__ ( ): """simple docstring""" __a = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS] __a = poker_hands.copy() shuffle(UpperCamelCase__ ) __a = chain(sorted(UpperCamelCase__ ) ) for index, hand in enumerate(UpperCamelCase__ ): assert hand == poker_hands[index] def lowerCAmelCase__ ( ): """simple docstring""" __a = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCAmelCase__ ( ): """simple docstring""" __a = PokerHand("""2C 4S AS 3D 5C""" ) __a = True __a = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCAmelCase__ ( ): """simple docstring""" __a = 0 __a = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) __a = os.path.join(UpperCamelCase__ , """poker_hands.txt""" ) with open(UpperCamelCase__ ) as file_hand: for line in file_hand: __a = line[:14].strip() __a = line[15:].strip() __a = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ ) __a = player.compare_with(UpperCamelCase__ ) if output == "Win": answer += 1 assert answer == 376
302
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "vit_mae" def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int: super().__init__(**__lowerCamelCase) _A : int = hidden_size _A : List[str] = num_hidden_layers _A : List[Any] = num_attention_heads _A : Optional[Any] = intermediate_size _A : Optional[int] = hidden_act _A : List[Any] = hidden_dropout_prob _A : List[Any] = attention_probs_dropout_prob _A : Union[str, Any] = initializer_range _A : str = layer_norm_eps _A : Any = image_size _A : int = patch_size _A : int = num_channels _A : Dict = qkv_bias _A : Tuple = decoder_num_attention_heads _A : Tuple = decoder_hidden_size _A : List[str] = decoder_num_hidden_layers _A : Optional[Any] = decoder_intermediate_size _A : List[str] = mask_ratio _A : Union[str, Any] = norm_pix_loss
11
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 1000 ): return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
309
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ): lowerCAmelCase = int(_UpperCAmelCase ) # Initialize Result lowerCAmelCase = [] # Traverse through all denomination for denomination in reversed(_UpperCAmelCase ): # Find denominations while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ): total_value -= int(_UpperCAmelCase ) answer.append(_UpperCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __UpperCamelCase : Any = [] __UpperCamelCase : List[Any] = '''0''' if ( input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower() == "y" ): __UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) __UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip() else: # All denominations of Indian Currency if user does not enter __UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000] __UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip() if int(value) == 0 or int(value) < 0: print('''The total value cannot be zero or negative.''') else: print(f'''Following is minimal change for {value}: ''') __UpperCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=''' ''')
309
1
def a ( snake_case__: list ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] lowercase_ = [] def generate(snake_case__: int , snake_case__: list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowercase_ , lowercase_ = arr[k - 1], arr[i] else: # k is odd lowercase_ , lowercase_ = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": __a = input('Enter numbers separated by a comma:\n').strip() __a = [int(item) for item in user_input.split(',')] print(heaps(arr))
30
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : Optional[int] = """src/transformers""" UpperCAmelCase_ : Tuple = """docs/source/en""" UpperCAmelCase_ : Optional[Any] = """.""" def _A (__a , __a , __a ) -> Dict: """simple docstring""" with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: SCREAMING_SNAKE_CASE_ : Dict = f.readlines() # Find the start prompt. SCREAMING_SNAKE_CASE_ : List[Any] = 0 while not lines[start_index].startswith(__a ): start_index += 1 start_index += 1 SCREAMING_SNAKE_CASE_ : Tuple = start_index while not lines[end_index].startswith(__a ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH) def _A (__a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a ) return [m.group(0 ) for m in matches] def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a ) SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2 SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _A () -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a ) SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a ) # Let's lookup through all transformers object (once). for attr_name in dir(__a ): SCREAMING_SNAKE_CASE_ : Any = None if attr_name.endswith('''Tokenizer''' ): SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : int = tf_models SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0] elif _re_flax_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : Any = flax_models SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0] elif _re_pt_models.match(__a ) is not None: SCREAMING_SNAKE_CASE_ : str = pt_models SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0] if lookup_dict is not None: while len(__a ) > 0: if attr_name in model_name_to_prefix.values(): SCREAMING_SNAKE_CASE_ : List[str] = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] ) # Let's build that table! SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns] SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2 # Build the table per se SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''} for name in model_names: SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name] SCREAMING_SNAKE_CASE_ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n" return table def _A (__a=False ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file( filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ : Any = parser.parse_args() check_model_table(args.fix_and_overwrite)
91
0
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase = '''us-east-1''' # defaults region @dataclass class __magic_name__ : __A : str __A : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role" __A : List[Any] = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_00, "save_steps": 55_00, } __A : Union[str, Any] = {**hyperparameters, "max_steps": 10_00} @property def __snake_case ( self : Union[str, Any] ): '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __snake_case ( self : str ): '''simple docstring''' return f"""{self.framework}-transfromers-test""" @property def __snake_case ( self : Dict ): '''simple docstring''' return f"""./tests/sagemaker/scripts/{self.framework}""" @property def __snake_case ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''') def lowerCamelCase (a_ :int) -> Dict: lowercase :Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework)
358
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = "swin2sr" __A : Dict = { "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Dict = image_size lowercase :List[str] = patch_size lowercase :Tuple = num_channels lowercase :int = embed_dim lowercase :Any = depths lowercase :Union[str, Any] = len(snake_case__ ) lowercase :List[str] = num_heads lowercase :int = window_size lowercase :Tuple = mlp_ratio lowercase :List[Any] = qkv_bias lowercase :Optional[int] = hidden_dropout_prob lowercase :Tuple = attention_probs_dropout_prob lowercase :Tuple = drop_path_rate lowercase :Optional[Any] = hidden_act lowercase :Union[str, Any] = use_absolute_embeddings lowercase :Dict = layer_norm_eps lowercase :Optional[Any] = initializer_range lowercase :Optional[Any] = upscale lowercase :Any = img_range lowercase :Optional[int] = resi_connection lowercase :Union[str, Any] = upsampler
172
0
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type(__UpperCAmelCase ) def __call__( self : List[Any] , __UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCAmelCase : Any ): '''simple docstring''' return super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : str ): '''simple docstring''' return {}, {}, {} def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = load_image(__UpperCAmelCase ) _A = image.size _A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) return model_inputs def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = self.model(**__UpperCAmelCase ) return model_outputs def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = model_outputs.predicted_depth _A = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCAmelCase ) _A = prediction.squeeze().cpu().numpy() _A = (output * 255 / np.max(__UpperCAmelCase )).astype("uint8" ) _A = Image.fromarray(__UpperCAmelCase ) _A = {} _A = predicted_depth _A = depth return output_dict
79
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar snake_case_ = TypeVar('''T''') snake_case_ = TypeVar('''U''') class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self , a , a): lowercase__ : List[Any] = key lowercase__ : List[Any] = val lowercase__ : DoubleLinkedListNode[T, U] | None = None lowercase__ : DoubleLinkedListNode[T, U] | None = None def __repr__( self): return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}""" ) class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): def __init__( self): lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a) lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a) lowercase__ , lowercase__ : Union[str, Any] = self.rear, self.head def __repr__( self): lowercase__ : Any = ['DoubleLinkedList'] lowercase__ : List[str] = self.head while node.next is not None: rep.append(str(a)) lowercase__ : Tuple = node.next rep.append(str(self.rear)) return ",\n ".join(a) def snake_case_ ( self , a): lowercase__ : Optional[Any] = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase__ : Dict = node lowercase__ : int = previous lowercase__ : Union[str, Any] = node lowercase__ : Optional[int] = self.rear def snake_case_ ( self , a): if node.prev is None or node.next is None: return None lowercase__ : Union[str, Any] = node.next lowercase__ : Tuple = node.prev lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = None return node class SCREAMING_SNAKE_CASE__ (Generic[T, U] ): __lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , a): lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList() lowercase__ : Optional[Any] = capacity lowercase__ : Union[str, Any] = 0 lowercase__ : Tuple = 0 lowercase__ : int = 0 lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self): return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self , a): return key in self.cache def snake_case_ ( self , a): # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key] lowercase__ : str = self.list.remove(self.cache[key]) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(a) return node.val self.miss += 1 return None def snake_case_ ( self , a , a): if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase__ : Optional[int] = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(a) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase__ : Optional[Any] = DoubleLinkedListNode(a , a) self.list.add(self.cache[key]) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase__ : Any = self.list.remove(self.cache[key]) assert node is not None # node guaranteed to be in list lowercase__ : Union[str, Any] = value self.list.add(a) @classmethod def snake_case_ ( cls , a = 128): def cache_decorator_inner(a) -> Callable[..., U]: def cache_decorator_wrapper(*a) -> U: if func not in cls.decorator_function_to_instance_map: lowercase__ : Dict = LRUCache(a) lowercase__ : str = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: lowercase__ : str = func(*a) cls.decorator_function_to_instance_map[func].put(args[0] , a) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(a , 'cache_info' , a) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
214
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( 'Warning: upper bound of deterministic test is exceeded. ' 'Pass allow_probable=True to allow probabilistic test. ' 'A return value of True indicates a probable prime.' ) # array bounds provided by analysis __snake_case : List[Any] = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] __snake_case : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_UpperCAmelCase ,1 ): if n < _p: # then we have our last prime to check __snake_case : Any = primes[:idx] break __snake_case , __snake_case : Any = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: __snake_case : Optional[int] = False for r in range(_UpperCAmelCase ): __snake_case : str = pow(_UpperCAmelCase ,d * 2**r ,_UpperCAmelCase ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): __snake_case : Union[str, Any] = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def a_ ( ) -> None: assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[str] = DiTPipeline __UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCAmelCase : Optional[int] = False def __snake_case ( self : Optional[int] ) -> str: torch.manual_seed(0 ) __snake_case : Dict = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , ) __snake_case : Union[str, Any] = AutoencoderKL() __snake_case : int = DDIMScheduler() __snake_case : Tuple = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=0 ) -> int: if str(lowerCamelCase ).startswith("mps" ): __snake_case : str = torch.manual_seed(lowerCamelCase ) else: __snake_case : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __snake_case : Optional[Any] = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __snake_case ( self : str ) -> List[str]: __snake_case : Tuple = "cpu" __snake_case : int = self.get_dummy_components() __snake_case : str = self.pipeline_class(**lowerCamelCase ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase ) __snake_case : Optional[Any] = pipe(**lowerCamelCase ).images __snake_case : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) __snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase , 1E-3 ) def __snake_case ( self : List[str] ) -> Tuple: self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __snake_case ( self : Tuple ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[Any] ) -> Any: __snake_case : Any = torch.manual_seed(0 ) __snake_case : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) __snake_case : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"] __snake_case : Optional[Any] = pipe.get_label_ids(lowerCamelCase ) __snake_case : List[Any] = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): __snake_case : int = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __snake_case ( self : Union[str, Any] ) -> int: __snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) __snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) __snake_case : Tuple = ["vase", "umbrella"] __snake_case : List[str] = pipe.get_label_ids(lowerCamelCase ) __snake_case : Tuple = torch.manual_seed(0 ) __snake_case : str = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowerCamelCase , lowerCamelCase ): __snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
123
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _snake_case : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _snake_case : int = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _snake_case : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _snake_case : List[str] = train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) _snake_case : Any = test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions _snake_case : Optional[Any] = tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) _snake_case : int = tf.keras.preprocessing.image.img_to_array(test_image) _snake_case : Tuple = np.expand_dims(test_image, axis=0) _snake_case : Any = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _snake_case : Any = "Normal" if result[0][0] == 1: _snake_case : List[str] = "Abnormality detected"
123
1
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> int: '''simple docstring''' if not is_accelerate_available(): return method __magic_name__ : int = version.parse(accelerate.__version__ ).base_version if version.parse(_snake_case ) < version.parse("0.17.0" ): return method def wrapper(self : List[str] , *_snake_case : Dict , **_snake_case : int ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *_snake_case , **_snake_case ) return wrapper
353
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( snake_case ): UpperCamelCase__ = ['image_processor', 'tokenizer'] UpperCamelCase__ = 'BridgeTowerImageProcessor' UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self , _a , _a ): super().__init__(_a , _a ) def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ): __magic_name__ : Dict = self.tokenizer( text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel_values + pixel_mask __magic_name__ : List[str] = self.image_processor( _a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a ) encoding.update(_a ) return encoding def SCREAMING_SNAKE_CASE ( self , *_a , **_a ): return self.tokenizer.batch_decode(*_a , **_a ) def SCREAMING_SNAKE_CASE ( self , *_a , **_a ): return self.tokenizer.decode(*_a , **_a ) @property def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : Dict = self.tokenizer.model_input_names __magic_name__ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
41
0
'''simple docstring''' import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _snake_case = logging.get_logger('transformers.models.speecht5') _snake_case = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } _snake_case = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } _snake_case = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } _snake_case = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } _snake_case = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } _snake_case = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } _snake_case = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } _snake_case = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } _snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _snake_case = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _snake_case = [] _snake_case = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] _snake_case = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] _snake_case = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] _snake_case = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]: for attribute in key.split("." ): _lowercase : Dict = getattr(snake_case , snake_case ) if weight_type is not None: _lowercase : Union[str, Any] = getattr(snake_case , snake_case ).shape else: _lowercase : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _lowercase : str = value elif weight_type == "weight_g": _lowercase : List[str] = value elif weight_type == "weight_v": _lowercase : int = value elif weight_type == "bias": _lowercase : Union[str, Any] = value elif weight_type == "running_mean": _lowercase : Union[str, Any] = value elif weight_type == "running_var": _lowercase : Optional[Any] = value elif weight_type == "num_batches_tracked": _lowercase : Tuple = value else: _lowercase : int = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _A ( snake_case , snake_case ) -> str: for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: _lowercase , _lowercase : Optional[Any] = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def _A ( snake_case , snake_case , snake_case ) -> Tuple: _lowercase : List[Any] = [] if task == "s2t": _lowercase : str = hf_model.speechta.encoder.prenet.feature_encoder _lowercase : List[str] = MAPPING_S2T _lowercase : Optional[Any] = IGNORE_KEYS_S2T elif task == "t2s": _lowercase : List[Any] = None _lowercase : Tuple = MAPPING_T2S _lowercase : List[str] = IGNORE_KEYS_T2S elif task == "s2s": _lowercase : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder _lowercase : Optional[Any] = MAPPING_S2S _lowercase : Tuple = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(snake_case , snake_case ): logger.info(F'''{name} was ignored''' ) continue _lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , ) _lowercase : Optional[int] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _lowercase , _lowercase : Optional[int] = key.split(".*." ) if prefix in name and suffix in name: _lowercase : Dict = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _lowercase : Optional[Any] = True if "*" in mapped_key: _lowercase : int = name.split(snake_case )[0].split("." )[-2] _lowercase : Union[str, Any] = mapped_key.replace("*" , snake_case ) if "weight_g" in name: _lowercase : Dict = "weight_g" elif "weight_v" in name: _lowercase : Optional[Any] = "weight_v" elif "bias" in name: _lowercase : Any = "bias" elif "weight" in name: _lowercase : Dict = "weight" elif "running_mean" in name: _lowercase : List[Any] = "running_mean" elif "running_var" in name: _lowercase : Union[str, Any] = "running_var" elif "num_batches_tracked" in name: _lowercase : str = "num_batches_tracked" else: _lowercase : str = None set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case ) continue if not is_used: unused_weights.append(snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _A ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]: _lowercase : Optional[int] = full_name.split("conv_layers." )[-1] _lowercase : Tuple = name.split("." ) _lowercase : Optional[int] = int(items[0] ) _lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _lowercase : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _lowercase : int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _lowercase : Optional[int] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _lowercase : Tuple = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case ) @torch.no_grad() def _A ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> Optional[Any]: if config_path is not None: _lowercase : List[str] = SpeechTaConfig.from_pretrained(snake_case ) else: _lowercase : int = SpeechTaConfig() if task == "s2t": _lowercase : List[Any] = config.max_text_positions _lowercase : Optional[int] = SpeechTaForSpeechToText(snake_case ) elif task == "t2s": _lowercase : str = 18_76 _lowercase : str = 6_00 _lowercase : List[Any] = config.max_speech_positions _lowercase : Optional[int] = SpeechTaForTextToSpeech(snake_case ) elif task == "s2s": _lowercase : Tuple = 18_76 _lowercase : List[Any] = config.max_speech_positions _lowercase : str = SpeechTaForSpeechToSpeech(snake_case ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: _lowercase : Any = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _lowercase : Optional[int] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case ) _lowercase : int = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) _lowercase : Dict = SpeechTaFeatureExtractor() _lowercase : str = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case ) processor.save_pretrained(snake_case ) _lowercase : Union[str, Any] = torch.load(snake_case ) recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case ) model.save_pretrained(snake_case ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(snake_case ) model.push_to_hub(snake_case ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _snake_case = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
250
'''simple docstring''' from __future__ import annotations import requests def _A ( snake_case ) -> dict: _lowercase : Dict = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(snake_case ).json() def _A ( snake_case = 10 ) -> list[dict]: _lowercase : List[Any] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" _lowercase : List[str] = requests.get(snake_case ).json()[:max_stories] return [get_hackernews_story(snake_case ) for story_id in story_ids] def _A ( snake_case = 10 ) -> str: _lowercase : Union[str, Any] = hackernews_top_stories(snake_case ) return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
250
1
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a : List[str] = logging.get_logger(__name__) a : Optional[int] = '''T5Config''' def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.array , _lowercase : int , _lowercase : int ) ->jnp.ndarray: '''simple docstring''' a : Tuple = jnp.zeros_like(_lowercase ) a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) a : Dict = shifted_input_ids.at[:, 0].set(_lowercase ) a : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase ) return shifted_input_ids class __UpperCamelCase ( a__ ): lowerCamelCase : Any ="""mt5""" lowerCamelCase : Dict =MTaConfig class __UpperCamelCase ( a__ ): lowerCamelCase : str ="""mt5""" lowerCamelCase : Tuple =MTaConfig class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] ="""mt5""" lowerCamelCase : Tuple =MTaConfig
79
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a__ ) class __UpperCamelCase ( a__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase : str =field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) lowerCamelCase : ClassVar[Features] =Features({"""text""": Value("""string""" )} ) lowerCamelCase : ClassVar[Features] =Features({"""summary""": Value("""string""" )} ) lowerCamelCase : str ="text" lowerCamelCase : str ="summary" @property def __a ( self ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
79
1
from __future__ import annotations import math import random from typing import Any class _lowercase : '''simple docstring''' def __init__( self ): '''simple docstring''' UpperCamelCase_ = [] UpperCamelCase_ = 0 UpperCamelCase_ = 0 def _lowerCamelCase ( self ): '''simple docstring''' return self.head == self.tail def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' self.data.append(snake_case__ ) UpperCamelCase_ = self.tail + 1 def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = self.data[self.head] UpperCamelCase_ = self.head + 1 return ret def _lowerCamelCase ( self ): '''simple docstring''' return self.tail - self.head def _lowerCamelCase ( self ): '''simple docstring''' print(self.data ) print("**************" ) print(self.data[self.head : self.tail] ) class _lowercase : '''simple docstring''' def __init__( self , snake_case__ ): '''simple docstring''' UpperCamelCase_ = data UpperCamelCase_ = None UpperCamelCase_ = None UpperCamelCase_ = 1 def _lowerCamelCase ( self ): '''simple docstring''' return self.data def _lowerCamelCase ( self ): '''simple docstring''' return self.left def _lowerCamelCase ( self ): '''simple docstring''' return self.right def _lowerCamelCase ( self ): '''simple docstring''' return self.height def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' UpperCamelCase_ = data def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' UpperCamelCase_ = node def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' UpperCamelCase_ = node def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' UpperCamelCase_ = height def _lowerCAmelCase (_lowerCAmelCase): if node is None: return 0 return node.get_height() def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): if a > b: return a return b def _lowerCAmelCase (_lowerCAmelCase): print("left rotation node:" , node.get_data()) UpperCamelCase_ = node.get_left() assert ret is not None node.set_left(ret.get_right()) ret.set_right(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1 node.set_height(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1 ret.set_height(_lowerCAmelCase) return ret def _lowerCAmelCase (_lowerCAmelCase): print("right rotation node:" , node.get_data()) UpperCamelCase_ = node.get_right() assert ret is not None node.set_right(ret.get_left()) ret.set_left(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1 node.set_height(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(ret.get_right()) , get_height(ret.get_left())) + 1 ret.set_height(_lowerCAmelCase) return ret def _lowerCAmelCase (_lowerCAmelCase): UpperCamelCase_ = node.get_left() assert left_child is not None node.set_left(left_rotation(_lowerCAmelCase)) return right_rotation(_lowerCAmelCase) def _lowerCAmelCase (_lowerCAmelCase): UpperCamelCase_ = node.get_right() assert right_child is not None node.set_right(right_rotation(_lowerCAmelCase)) return left_rotation(_lowerCAmelCase) def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): if node is None: return MyNode(_lowerCAmelCase) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _lowerCAmelCase)) if ( get_height(node.get_left()) - get_height(node.get_right()) == 2 ): # an unbalance detected UpperCamelCase_ = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child UpperCamelCase_ = right_rotation(_lowerCAmelCase) else: UpperCamelCase_ = lr_rotation(_lowerCAmelCase) else: node.set_right(insert_node(node.get_right() , _lowerCAmelCase)) if get_height(node.get_right()) - get_height(node.get_left()) == 2: UpperCamelCase_ = node.get_right() assert right_child is not None if data < right_child.get_data(): UpperCamelCase_ = rl_rotation(_lowerCAmelCase) else: UpperCamelCase_ = left_rotation(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(node.get_right()) , get_height(node.get_left())) + 1 node.set_height(_lowerCAmelCase) return node def _lowerCAmelCase (_lowerCAmelCase): while True: UpperCamelCase_ = root.get_right() if right_child is None: break UpperCamelCase_ = right_child return root.get_data() def _lowerCAmelCase (_lowerCAmelCase): while True: UpperCamelCase_ = root.get_left() if left_child is None: break UpperCamelCase_ = left_child return root.get_data() def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): UpperCamelCase_ = root.get_left() UpperCamelCase_ = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: UpperCamelCase_ = get_left_most(_lowerCAmelCase) root.set_data(_lowerCAmelCase) root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase)) elif left_child is not None: UpperCamelCase_ = left_child elif right_child is not None: UpperCamelCase_ = right_child else: return None elif root.get_data() > data: if left_child is None: print("No such data") return root else: root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase)) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase)) if get_height(_lowerCAmelCase) - get_height(_lowerCAmelCase) == 2: assert right_child is not None if get_height(right_child.get_right()) > get_height(right_child.get_left()): UpperCamelCase_ = left_rotation(_lowerCAmelCase) else: UpperCamelCase_ = rl_rotation(_lowerCAmelCase) elif get_height(_lowerCAmelCase) - get_height(_lowerCAmelCase) == -2: assert left_child is not None if get_height(left_child.get_left()) > get_height(left_child.get_right()): UpperCamelCase_ = right_rotation(_lowerCAmelCase) else: UpperCamelCase_ = lr_rotation(_lowerCAmelCase) UpperCamelCase_ = my_max(get_height(root.get_right()) , get_height(root.get_left())) + 1 root.set_height(_lowerCAmelCase) return root class _lowercase : '''simple docstring''' def __init__( self ): '''simple docstring''' UpperCamelCase_ = None def _lowerCamelCase ( self ): '''simple docstring''' return get_height(self.root ) def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' print("insert:" + str(snake_case__ ) ) UpperCamelCase_ = insert_node(self.root , snake_case__ ) def _lowerCamelCase ( self , snake_case__ ): '''simple docstring''' print("delete:" + str(snake_case__ ) ) if self.root is None: print("Tree is empty!" ) return UpperCamelCase_ = del_node(self.root , snake_case__ ) def __str__( self , ): # a level traversale, gives a more intuitive look on the tree '''simple docstring''' UpperCamelCase_ = "" UpperCamelCase_ = MyQueue() q.push(self.root ) UpperCamelCase_ = self.get_height() if layer == 0: return output UpperCamelCase_ = 0 while not q.is_empty(): UpperCamelCase_ = q.pop() UpperCamelCase_ = " " * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(snake_case__ ) q.push(snake_case__ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space UpperCamelCase_ = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , snake_case__ ) - 1: UpperCamelCase_ = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _lowerCAmelCase (): import doctest doctest.testmod() if __name__ == "__main__": _test() UpperCAmelCase : Union[str, Any] =AVLtree() UpperCAmelCase : List[Any] =list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
128
def _lowerCAmelCase (_lowerCAmelCase): UpperCamelCase_ = len(_lowerCAmelCase) UpperCamelCase_ = len(matrix[0]) UpperCamelCase_ = min(_lowerCAmelCase , _lowerCAmelCase) for row in range(_lowerCAmelCase): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _lowerCAmelCase): UpperCamelCase_ = matrix[col][row] / matrix[row][row] for i in range(_lowerCAmelCase , _lowerCAmelCase): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows UpperCamelCase_ = True for i in range(row + 1 , _lowerCAmelCase): if matrix[i][row] != 0: UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row] UpperCamelCase_ = False break if reduce: rank -= 1 for i in range(_lowerCAmelCase): UpperCamelCase_ = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
128
1
"""simple docstring""" import argparse import os import re __UpperCamelCase : List[Any] = '''src/transformers''' # Pattern that looks at the indentation in a line. __UpperCamelCase : List[str] = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. __UpperCamelCase : List[str] = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __UpperCamelCase : List[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. __UpperCamelCase : List[Any] = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __UpperCamelCase : List[str] = re.compile(R'\[([^\]]+)\]') def A ( _lowercase ): SCREAMING_SNAKE_CASE : List[str] = _re_indent.search(__lowerCAmelCase ) return "" if search is None else search.groups()[0] def A ( _lowercase , _lowercase="" , _lowercase=None , _lowercase=None ): SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Tuple = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__lowerCAmelCase ): index += 1 SCREAMING_SNAKE_CASE : Any = ['''\n'''.join(lines[:index] )] else: SCREAMING_SNAKE_CASE : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). SCREAMING_SNAKE_CASE : Union[str, Any] = [lines[index]] index += 1 while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__lowerCAmelCase ) ) if index < len(__lowerCAmelCase ) - 1: SCREAMING_SNAKE_CASE : Dict = [lines[index + 1]] index += 1 else: SCREAMING_SNAKE_CASE : Union[str, Any] = [] else: blocks.append('''\n'''.join(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE : Any = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__lowerCAmelCase ) > 0: blocks.append('''\n'''.join(__lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__lowerCAmelCase ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def A ( _lowercase ): def _inner(_lowercase ): return key(__lowerCAmelCase ).lower().replace('''_''' , '''''' ) return _inner def A ( _lowercase , _lowercase=None ): # If no key is provided, we use a noop. def noop(_lowercase ): return x if key is None: SCREAMING_SNAKE_CASE : Tuple = noop # Constants are all uppercase, they go first. SCREAMING_SNAKE_CASE : Optional[Any] = [obj for obj in objects if key(__lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. SCREAMING_SNAKE_CASE : List[str] = [obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. SCREAMING_SNAKE_CASE : Tuple = [obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()] SCREAMING_SNAKE_CASE : Dict = ignore_underscore(__lowerCAmelCase ) return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) def A ( _lowercase ): # This inner function sort imports between [ ]. def _replace(_lowercase ): SCREAMING_SNAKE_CASE : List[str] = match.groups()[0] if "," not in imports: return f"""[{imports}]""" SCREAMING_SNAKE_CASE : Any = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: SCREAMING_SNAKE_CASE : List[Any] = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__lowerCAmelCase )] ) + "]" SCREAMING_SNAKE_CASE : Tuple = import_statement.split('''\n''' ) if len(__lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. SCREAMING_SNAKE_CASE : List[Any] = 2 if lines[1].strip() == '''[''' else 1 SCREAMING_SNAKE_CASE : Union[str, Any] = [(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] SCREAMING_SNAKE_CASE : Optional[Any] = sort_objects(__lowerCAmelCase , key=lambda _lowercase : x[1] ) SCREAMING_SNAKE_CASE : Any = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: SCREAMING_SNAKE_CASE : List[str] = _re_bracket_content.sub(_replace , lines[1] ) else: SCREAMING_SNAKE_CASE : str = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: SCREAMING_SNAKE_CASE : Optional[Any] = keys[:-1] SCREAMING_SNAKE_CASE : int = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(__lowerCAmelCase )] ) return "\n".join(__lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line SCREAMING_SNAKE_CASE : Union[str, Any] = _re_bracket_content.sub(_replace , __lowerCAmelCase ) return import_statement def A ( _lowercase , _lowercase=True ): with open(__lowerCAmelCase , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE : List[str] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 SCREAMING_SNAKE_CASE : List[Any] = split_code_in_indented_blocks( __lowerCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. SCREAMING_SNAKE_CASE : Any = main_blocks[block_idx] SCREAMING_SNAKE_CASE : int = block.split('''\n''' ) # Get to the start of the imports. SCREAMING_SNAKE_CASE : Tuple = 0 while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: SCREAMING_SNAKE_CASE : int = len(__lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(__lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. SCREAMING_SNAKE_CASE : Tuple = '''\n'''.join(block_lines[line_idx:-1] ) SCREAMING_SNAKE_CASE : List[str] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. SCREAMING_SNAKE_CASE : Optional[Any] = split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend SCREAMING_SNAKE_CASE : List[Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. SCREAMING_SNAKE_CASE : Optional[int] = [(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. SCREAMING_SNAKE_CASE : Dict = [(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None] SCREAMING_SNAKE_CASE : Any = [x[0] for x in sorted(__lowerCAmelCase , key=lambda _lowercase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Any = [] for i in range(len(__lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: SCREAMING_SNAKE_CASE : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(__lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(__lowerCAmelCase ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(__lowerCAmelCase ) ) def A ( _lowercase=True ): SCREAMING_SNAKE_CASE : Tuple = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: SCREAMING_SNAKE_CASE : Union[str, Any] = sort_imports(os.path.join(__lowerCAmelCase , '''__init__.py''' ) , check_only=__lowerCAmelCase ) if result: SCREAMING_SNAKE_CASE : Optional[Any] = [os.path.join(__lowerCAmelCase , '''__init__.py''' )] if len(__lowerCAmelCase ) > 0: raise ValueError(f"""Would overwrite {len(__lowerCAmelCase )} files, run `make style`.""" ) if __name__ == "__main__": __UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') __UpperCamelCase : List[Any] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
371
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": __UpperCamelCase : Dict = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') __UpperCamelCase : Optional[int] = parser.parse_args() if args.model_type == "bert": __UpperCamelCase : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name) __UpperCamelCase : Optional[int] = 'bert' else: raise ValueError('args.model_type should be "bert".') __UpperCamelCase : List[Any] = model.state_dict() __UpperCamelCase : Union[str, Any] = {} for w in ["word_embeddings", "position_embeddings"]: __UpperCamelCase : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: __UpperCamelCase : Optional[int] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] __UpperCamelCase : Any = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: __UpperCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] __UpperCamelCase : Union[str, Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] __UpperCamelCase : Union[str, Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] __UpperCamelCase : List[str] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] __UpperCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] __UpperCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] __UpperCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] __UpperCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 __UpperCamelCase : List[str] = state_dict['cls.predictions.decoder.weight'] __UpperCamelCase : int = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: __UpperCamelCase : List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""] __UpperCamelCase : List[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
258
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
from math import factorial UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)} def _a ( a :int ) -> int: if not isinstance(a , a ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) ) def _a ( a :int = 60 , a :int = 1_000_000 ) -> int: if not isinstance(a , a ) or not isinstance(a , a ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length a = 0 # the cached sizes of the previous chains a = {} for start_chain_element in range(1 , a ): # The temporary set will contain the elements of the chain a = set() a = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. a = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(a ) chain_set_length += 1 a = digit_factorial_sum(a ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] a = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution()}""")
0
0
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : str = None UpperCamelCase_ : Dict = None @property def _A ( self : List[Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def _A ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , "feature_size" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "sampling_rate" ) ) self.assertTrue(hasattr(UpperCAmelCase_ , "padding_value" ) ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[str] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : str = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : str = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) for x, y in zip(UpperCAmelCase_ , processed_features[input_name] ) ) ) SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="np" ) SCREAMING_SNAKE_CASE : Dict = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Optional[int] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : int = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="pt" ) SCREAMING_SNAKE_CASE : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" ) SCREAMING_SNAKE_CASE : Optional[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: SCREAMING_SNAKE_CASE : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _A ( self : int , UpperCAmelCase_ : Dict=False ): def _inputs_have_equal_length(UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = len(input[0] ) for input_slice in input[1:]: if len(UpperCAmelCase_ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): if not np.allclose(np.asarray(UpperCAmelCase_ ) , np.asarray(UpperCAmelCase_ ) , atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : int = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : str = self.feat_extract_tester.seq_length_diff SCREAMING_SNAKE_CASE : Dict = self.feat_extract_tester.max_seq_length + pad_diff SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.min_seq_length SCREAMING_SNAKE_CASE : Optional[int] = self.feat_extract_tester.batch_size SCREAMING_SNAKE_CASE : Optional[Any] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad(UpperCAmelCase_ , padding="longest" ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.pad(UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[-1] ) ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[int] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="max_length" )[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , return_tensors="np" ) SCREAMING_SNAKE_CASE : Dict = input_a[input_name] self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : str = input_a[input_name] SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , padding="longest" , pad_to_multiple_of=10 ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_a[input_name] SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , pad_to_multiple_of=10 , max_length=UpperCAmelCase_ , return_tensors="np" , ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] self.assertTrue(all(len(UpperCAmelCase_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(UpperCAmelCase_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct SCREAMING_SNAKE_CASE : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=False ): def _inputs_have_equal_length(UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : int = len(input[0] ) for input_slice in input[1:]: if len(UpperCAmelCase_ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase_ : str , UpperCAmelCase_ : int ): if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): return False for input_slice_a, input_slice_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ): if not np.allclose(np.asarray(UpperCAmelCase_ ) , np.asarray(UpperCAmelCase_ ) , atol=1E-3 ): return False return True SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Tuple = BatchFeature({input_name: speech_inputs} ) # truncate to smallest SCREAMING_SNAKE_CASE : int = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = input_a[input_name] SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) ) SCREAMING_SNAKE_CASE : Tuple = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) # truncate to smallest with np SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Dict = input_a[input_name] SCREAMING_SNAKE_CASE : Tuple = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" ) SCREAMING_SNAKE_CASE : List[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) # truncate to middle SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase_ , return_tensors="np" , ) SCREAMING_SNAKE_CASE : List[str] = input_a[input_name] SCREAMING_SNAKE_CASE : Dict = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" ) SCREAMING_SNAKE_CASE : int = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(_inputs_are_equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , truncation=UpperCAmelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="longest" , truncation=UpperCAmelCase_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="longest" , truncation=UpperCAmelCase_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(UpperCAmelCase_ ): feat_extract.pad(UpperCAmelCase_ , padding="max_length" , truncation=UpperCAmelCase_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy SCREAMING_SNAKE_CASE : List[Any] = 12 SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase_ , truncation=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = input_a[input_name] SCREAMING_SNAKE_CASE : str = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of SCREAMING_SNAKE_CASE : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: SCREAMING_SNAKE_CASE : List[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(UpperCAmelCase_ ) ) self.assertFalse(_inputs_have_equal_length(UpperCAmelCase_ ) ) def _A ( self : str ): self._check_padding(numpify=UpperCAmelCase_ ) def _A ( self : Tuple ): self._check_padding(numpify=UpperCAmelCase_ ) def _A ( self : int ): self._check_truncation(numpify=UpperCAmelCase_ ) def _A ( self : List[str] ): self._check_truncation(numpify=UpperCAmelCase_ ) @require_torch def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Any = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : List[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Any = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" )[input_name] SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="pt" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _A ( self : int ): SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Optional[Any] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" )[input_name] SCREAMING_SNAKE_CASE : List[Any] = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="tf" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : int = [len(UpperCAmelCase_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Any = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Any = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : int = feat_extract.pad(UpperCAmelCase_ , padding="longest" , return_tensors="np" ) self.assertIn("attention_mask" , UpperCAmelCase_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase_ ) def _A ( self : str ): SCREAMING_SNAKE_CASE : List[Any] = self.feat_extract_dict SCREAMING_SNAKE_CASE : Optional[Any] = True SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.feat_extract_tester.prepare_inputs_for_common() SCREAMING_SNAKE_CASE : Dict = [len(UpperCAmelCase_ ) for x in speech_inputs] SCREAMING_SNAKE_CASE : Tuple = feat_extract.model_input_names[0] SCREAMING_SNAKE_CASE : Optional[int] = BatchFeature({input_name: speech_inputs} ) SCREAMING_SNAKE_CASE : Any = min(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract.pad( UpperCAmelCase_ , padding="max_length" , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="np" ) self.assertIn("attention_mask" , UpperCAmelCase_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
319
def lowerCamelCase__ ( ): """simple docstring""" return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] snake_case = generate_large_matrix() snake_case = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( lowercase ): """simple docstring""" assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid ) assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2 SCREAMING_SNAKE_CASE : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: SCREAMING_SNAKE_CASE : List[Any] = mid + 1 else: SCREAMING_SNAKE_CASE : Dict = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(lowercase ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = len(grid[0] ) for i in range(len(lowercase ) ): SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] ) total += bound return (len(lowercase ) * len(grid[0] )) - total def lowerCamelCase__ ( lowercase ): """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 0 for row in grid: for i, number in enumerate(lowercase ): if number < 0: total += len(lowercase ) - i break return total def lowerCamelCase__ ( ): """simple docstring""" from timeit import timeit print("Running benchmarks" ) SCREAMING_SNAKE_CASE : List[str] = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
319
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : Optional[Any] ={ 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict =[ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
9
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18} __SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : Tuple = parent __SCREAMING_SNAKE_CASE : List[Any] = batch_size __SCREAMING_SNAKE_CASE : List[str] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames __SCREAMING_SNAKE_CASE : Tuple = image_size __SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution __SCREAMING_SNAKE_CASE : Any = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Optional[Any] = size __SCREAMING_SNAKE_CASE : Optional[int] = do_normalize __SCREAMING_SNAKE_CASE : List[Any] = image_mean __SCREAMING_SNAKE_CASE : List[str] = image_std __SCREAMING_SNAKE_CASE : str = crop_size def __magic_name__( self :Tuple ) -> Any: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _lowercase ( A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None def __magic_name__( self :List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self ) @property def __magic_name__( self :int ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def __magic_name__( self :Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __magic_name__( self :List[Any] ) -> Union[str, Any]: # Initialize image_processing __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos __SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for video in video_inputs: self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__( self :str ) -> int: # Initialize image_processing __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for video in video_inputs: self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__( self :Any ) -> List[str]: # Initialize image_processing __SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for video in video_inputs: self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
9
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : int = logging.get_logger(__name__) snake_case : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart snake_case : Union[str, Any] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } snake_case : Dict = { """facebook/bart-base""": 1_024, """facebook/bart-large""": 1_024, """facebook/bart-large-mnli""": 1_024, """facebook/bart-large-cnn""": 1_024, """facebook/bart-large-xsum""": 1_024, """yjernite/bart_eli5""": 1_024, } @lru_cache() def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' __magic_name__ : List[str] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __magic_name__ : Optional[int] = bs[:] __magic_name__ : str = 0 for b in range(2**8 ): if b not in bs: bs.append(__snake_case ) cs.append(2**8 + n ) n += 1 __magic_name__ : int = [chr(__snake_case ) for n in cs] return dict(zip(__snake_case , __snake_case ) ) def lowerCAmelCase_ ( _snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : str = set() __magic_name__ : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __magic_name__ : Dict = char return pairs class _snake_case ( lowerCamelCase__ ): UpperCamelCase__ = VOCAB_FILES_NAMES UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ = ['input_ids', 'attention_mask'] def __init__( self , _a , _a , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , **_a , ): __magic_name__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token __magic_name__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token __magic_name__ : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token __magic_name__ : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token __magic_name__ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token __magic_name__ : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __magic_name__ : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , ) with open(_a , encoding="utf-8" ) as vocab_handle: __magic_name__ : Tuple = json.load(_a ) __magic_name__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} __magic_name__ : str = errors # how to handle errors in decoding __magic_name__ : Tuple = bytes_to_unicode() __magic_name__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(_a , encoding="utf-8" ) as merges_handle: __magic_name__ : str = merges_handle.read().split("\n" )[1:-1] __magic_name__ : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __magic_name__ : List[Any] = dict(zip(_a , range(len(_a ) ) ) ) __magic_name__ : Optional[int] = {} __magic_name__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __magic_name__ : Optional[int] = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def SCREAMING_SNAKE_CASE ( self ): return len(self.encoder ) def SCREAMING_SNAKE_CASE ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self , _a ): if token in self.cache: return self.cache[token] __magic_name__ : Any = tuple(_a ) __magic_name__ : List[str] = get_pairs(_a ) if not pairs: return token while True: __magic_name__ : Tuple = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __magic_name__ , __magic_name__ : int = bigram __magic_name__ : List[str] = [] __magic_name__ : Optional[int] = 0 while i < len(_a ): try: __magic_name__ : Tuple = word.index(_a , _a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __magic_name__ : str = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __magic_name__ : str = tuple(_a ) __magic_name__ : List[Any] = new_word if len(_a ) == 1: break else: __magic_name__ : Tuple = get_pairs(_a ) __magic_name__ : Union[str, Any] = " ".join(_a ) __magic_name__ : List[Any] = word return word def SCREAMING_SNAKE_CASE ( self , _a ): __magic_name__ : List[str] = [] for token in re.findall(self.pat , _a ): __magic_name__ : List[Any] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(" " ) ) return bpe_tokens def SCREAMING_SNAKE_CASE ( self , _a ): return self.encoder.get(_a , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self , _a ): return self.decoder.get(_a ) def SCREAMING_SNAKE_CASE ( self , _a ): __magic_name__ : int = "".join(_a ) __magic_name__ : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def SCREAMING_SNAKE_CASE ( self , _a , _a = None ): if not os.path.isdir(_a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ : str = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __magic_name__ : List[str] = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" ) __magic_name__ : Dict = 0 with open(_a , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) __magic_name__ : List[str] = token_index writer.write(" ".join(_a ) + "\n" ) index += 1 return vocab_file, merge_file def SCREAMING_SNAKE_CASE ( self , _a , _a = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __magic_name__ : Dict = [self.cls_token_id] __magic_name__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def SCREAMING_SNAKE_CASE ( self , _a , _a = None ): __magic_name__ : Union[str, Any] = [self.sep_token_id] __magic_name__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE ( self , _a , _a=False , **_a ): __magic_name__ : str = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): __magic_name__ : int = " " + text return (text, kwargs)
356
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() snake_case : int = logging.get_logger(__name__) def lowerCAmelCase_ ( _snake_case : str ) -> Optional[Any]: '''simple docstring''' __magic_name__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("Quantized models are not supported." ) __magic_name__ : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case ) if matches: __magic_name__ : List[str] = float(matches[1] ) __magic_name__ : Dict = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ : List[str] = 1001 __magic_name__ : Tuple = "imagenet-1k-id2label.json" __magic_name__ : Union[str, Any] = "huggingface/label-files" __magic_name__ : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) ) __magic_name__ : Tuple = {int(_snake_case ) + 1: v for k, v in idalabel.items()} __magic_name__ : Dict = "background" __magic_name__ : str = idalabel __magic_name__ : List[Any] = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( ) -> str: '''simple docstring''' __magic_name__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" __magic_name__ : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : int=False ) -> Optional[int]: '''simple docstring''' __magic_name__ : int = get_mobilenet_va_config(_snake_case ) # Load 🤗 model __magic_name__ : List[Any] = MobileNetVaForImageClassification(_snake_case ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ : Dict = MobileNetVaImageProcessor( crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , ) __magic_name__ : int = image_processor(images=prepare_img() , return_tensors="pt" ) __magic_name__ : Any = model(**_snake_case ) __magic_name__ : Dict = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ : Tuple = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ : Optional[Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: __magic_name__ : str = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ) Path(_snake_case ).mkdir(exist_ok=_snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_snake_case ) if push_to_hub: print("Pushing to the hub..." ) __magic_name__ : List[str] = "google/" + model_name image_processor.push_to_hub(_snake_case ) model.push_to_hub(_snake_case ) if __name__ == "__main__": snake_case : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) snake_case : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
41
0
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ :Dict = os.path.abspath(lowercase__ ) logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowerCAmelCase_ :Any = tf.train.list_variables(lowercase__ ) lowerCAmelCase_ :List[str] = [] lowerCAmelCase_ :str = [] lowerCAmelCase_ :int = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowerCAmelCase_ :Union[str, Any] = full_name.split("""/""" ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(f"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowerCAmelCase_ :Dict = name[1:] # figure out how many levels deep the name is lowerCAmelCase_ :Tuple = 0 for _name in name: if _name.startswith("""layer_with_weights""" ): depth += 1 else: break layer_depth.append(lowercase__ ) # read data lowerCAmelCase_ :Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ ) names.append("""/""".join(lowercase__ ) ) arrays.append(lowercase__ ) logger.info(f"""Read a total of {len(lowercase__ ):,} layers""" ) # Sanity check if len(set(lowercase__ ) ) != 1: raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowercase__ ) )})""" ) lowerCAmelCase_ :Dict = list(set(lowercase__ ) )[0] if layer_depth != 1: raise ValueError( """The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP""" """ heads.""" ) # convert layers logger.info("""Converting weights...""" ) for full_name, array in zip(lowercase__ , lowercase__ ): lowerCAmelCase_ :str = full_name.split("""/""" ) lowerCAmelCase_ :Union[str, Any] = model lowerCAmelCase_ :Tuple = [] for i, m_name in enumerate(lowercase__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith("""layer_with_weights""" ): lowerCAmelCase_ :Dict = int(m_name.split("""-""" )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(["""embeddings""", """LayerNorm"""] ) lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """LayerNorm""" ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] ) lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """encoder""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """layer""" ) lowerCAmelCase_ :Tuple = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(["""pooler""", """dense"""] ) lowerCAmelCase_ :Tuple = getattr(lowercase__ , """pooler""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" ) elif m_name == "embeddings": trace.append("""embeddings""" ) lowerCAmelCase_ :Dict = getattr(lowercase__ , """embeddings""" ) if layer_num == 0: trace.append("""word_embeddings""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """word_embeddings""" ) elif layer_num == 1: trace.append("""position_embeddings""" ) lowerCAmelCase_ :int = getattr(lowercase__ , """position_embeddings""" ) elif layer_num == 2: trace.append("""token_type_embeddings""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """token_type_embeddings""" ) else: raise ValueError(f"""Unknown embedding layer with name {full_name}""" ) trace.append("""weight""" ) lowerCAmelCase_ :Tuple = getattr(lowercase__ , """weight""" ) elif m_name == "_attention_layer": # self-attention layer trace.extend(["""attention""", """self"""] ) lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """self""" ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(["""attention""", """output""", """LayerNorm"""] ) lowerCAmelCase_ :str = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """LayerNorm""" ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(["""attention""", """output""", """dense"""] ) lowerCAmelCase_ :Any = getattr(lowercase__ , """attention""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :List[Any] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_dense": # output dense trace.extend(["""output""", """dense"""] ) lowerCAmelCase_ :Any = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_layer_norm": # output dense trace.extend(["""output""", """LayerNorm"""] ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """output""" ) lowerCAmelCase_ :Any = getattr(lowercase__ , """LayerNorm""" ) elif m_name == "_key_dense": # attention key trace.append("""key""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """key""" ) elif m_name == "_query_dense": # attention query trace.append("""query""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """query""" ) elif m_name == "_value_dense": # attention value trace.append("""value""" ) lowerCAmelCase_ :List[str] = getattr(lowercase__ , """value""" ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(["""intermediate""", """dense"""] ) lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """intermediate""" ) lowerCAmelCase_ :Optional[int] = getattr(lowercase__ , """dense""" ) elif m_name == "_output_layer_norm": # output layer norm trace.append("""output""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """output""" ) # weights & biases elif m_name in ["bias", "beta"]: trace.append("""bias""" ) lowerCAmelCase_ :Union[str, Any] = getattr(lowercase__ , """bias""" ) elif m_name in ["kernel", "gamma"]: trace.append("""weight""" ) lowerCAmelCase_ :str = getattr(lowercase__ , """weight""" ) else: logger.warning(f"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowerCAmelCase_ :Dict = """.""".join(lowercase__ ) if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowercase__ ) or re.match( r"""(\S+)\.attention\.output\.dense\.weight""" , lowercase__ ): lowerCAmelCase_ :Tuple = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowerCAmelCase_ :Optional[Any] = array.transpose() if pointer.shape == array.shape: lowerCAmelCase_ :List[str] = torch.from_numpy(lowercase__ ) else: raise ValueError( f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" f""" {array.shape}""" ) logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' logger.info(f"""Loading model based on config from {config_path}...""" ) lowerCAmelCase_ :Optional[int] = BertConfig.from_json_file(lowercase__ ) lowerCAmelCase_ :Optional[Any] = BertModel(lowercase__ ) # Load weights from checkpoint logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) __UpperCAmelCase = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
84
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class _SCREAMING_SNAKE_CASE : def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :List[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase_ :str = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) lowerCAmelCase_ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , ) torch.manual_seed(0 ) lowerCAmelCase_ :Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :Dict = self.get_dummy_components() lowerCAmelCase_ :Tuple = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Any = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Optional[int] = inputs["""prompt"""] lowerCAmelCase_ :Optional[int] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Optional[int] = inputs["""output_type"""] if "image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""image"""] else: lowerCAmelCase_ :int = None if "mask_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""mask_image"""] else: lowerCAmelCase_ :int = None if "original_image" in inputs: lowerCAmelCase_ :List[Any] = inputs["""original_image"""] else: lowerCAmelCase_ :List[Any] = None lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A ) # inputs with prompt converted to embeddings lowerCAmelCase_ :List[str] = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :int = image if mask_image is not None: lowerCAmelCase_ :Tuple = mask_image if original_image is not None: lowerCAmelCase_ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__A , __A , __A ) lowerCAmelCase_ :Optional[int] = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""] lowerCAmelCase_ :Any = inputs["""num_inference_steps"""] lowerCAmelCase_ :Tuple = inputs["""output_type"""] # inputs with prompt converted to embeddings lowerCAmelCase_ :Tuple = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: lowerCAmelCase_ :Optional[int] = image if mask_image is not None: lowerCAmelCase_ :str = mask_image if original_image is not None: lowerCAmelCase_ :Tuple = original_image lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :Any = self.get_dummy_components() lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :Dict = pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A ) lowerCAmelCase_ :str = pipe_loaded(**__A )[0] lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max() self.assertLess(__A , 1E-4 )
84
1
'''simple docstring''' def lowercase_ ( lowerCAmelCase__ : Optional[int] = 600851475143 ): """simple docstring""" try: __UpperCAmelCase : Tuple = int(a_ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) __UpperCAmelCase : Union[str, Any] = 2 __UpperCAmelCase : List[str] = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 __UpperCAmelCase : List[Any] = i while n % i == 0: __UpperCAmelCase : Union[str, Any] = n // i i += 1 return int(a_ ) if __name__ == "__main__": print(F'{solution() = }')
364
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING _SCREAMING_SNAKE_CASE : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING def __A ( self ) -> Any: '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser"""}, ] , ) __UpperCAmelCase : List[str] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38_015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25_506, """token_str""": """ accuser""", }, ] , ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : Dict = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS"""}, ] , ) __UpperCAmelCase : str = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13_606, """token_str""": """ Clara"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35_676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() __UpperCAmelCase : str = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow @require_torch def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__UpperCAmelCase ) @slow @require_tf def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : int = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""}, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2_201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12_790, """token_str""": """ Lyon""", }, ] , ) __UpperCAmelCase : Optional[int] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""}, ] , ) @require_torch def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : int = None self.run_pipeline_test(__UpperCAmelCase , [] ) @require_tf def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Dict = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None self.run_pipeline_test(__UpperCAmelCase , [] ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __UpperCAmelCase : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = [ f'This is another {tokenizer.mask_token} test', ] return fill_masker, examples def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = fill_masker.tokenizer __UpperCAmelCase : Union[str, Any] = fill_masker.model __UpperCAmelCase : Tuple = fill_masker( f'This is a {tokenizer.mask_token}' , ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : int = fill_masker([f'This is a {tokenizer.mask_token}'] ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Union[str, Any] = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , ) with self.assertRaises(__UpperCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase ) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase ) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = tokenizer.get_vocab() __UpperCAmelCase : Dict = sorted(vocab.keys() )[:2] # Pipeline argument __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase ) __UpperCAmelCase : List[str] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : Any = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Call argument __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Tuple = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCAmelCase ) ) # Score equivalence __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : Dict = [top_mask["""token_str"""] for top_mask in outputs] __UpperCAmelCase : str = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ) == set(__UpperCAmelCase ): __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , targets=__UpperCAmelCase ) __UpperCAmelCase : int = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) # Raises with invalid with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Any = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Dict = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""""""] ) with self.assertRaises(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , targets="""""" ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 ) __UpperCAmelCase : Optional[int] = fill_masker(f'This is a {tokenizer.mask_token}' ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : int = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = tokenizer.get_vocab() __UpperCAmelCase : List[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) # top_k=2, ntargets=3 __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : str = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=__UpperCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __UpperCAmelCase : Tuple = [el["""token_str"""] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=__UpperCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : List[Any] = tokenizer.get_vocab() # String duplicates + id duplicates __UpperCAmelCase : Dict = sorted(vocab.keys() )[:3] __UpperCAmelCase : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] __UpperCAmelCase : Optional[int] = fill_masker(f'My name is {tokenizer.mask_token}' , targets=__UpperCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase ) , 3 ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) __UpperCAmelCase : Dict = fill_masker( f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 ) self.assertEqual( __UpperCAmelCase , [ [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], [ {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, {"""sequence""": ANY(__UpperCAmelCase ), """score""": ANY(__UpperCAmelCase ), """token""": ANY(__UpperCAmelCase ), """token_str""": ANY(__UpperCAmelCase )}, ], ] , )
16
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Union[str, Any] = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
264
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _UpperCAmelCase ( lowerCAmelCase__): def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ): snake_case_ : Dict = params snake_case_ : Union[str, Any] = np.array(lowercase_ ) snake_case_ : str = np.array([len(lowercase_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , lowercase_ : Union[str, Any] ): return (self.token_ids[index], self.lengths[index]) def __len__( self : List[Any] ): return len(self.lengths ) def _snake_case ( self : Tuple ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _snake_case ( self : Tuple ): snake_case_ : str = self.params.max_model_input_size snake_case_ : Dict = self.lengths > max_len logger.info(f"Splitting {sum(lowercase_ )} too long sequences." ) def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ): return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )] snake_case_ : Tuple = [] snake_case_ : Any = [] if self.params.mlm: snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token'''] else: snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token'''] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ ) if sub_s[-1] != sep_id: snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ ) assert len(lowercase_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowercase_ ) new_tok_ids.extend(lowercase_ ) new_lengths.extend([len(lowercase_ ) for l in sub_seqs] ) snake_case_ : List[str] = np.array(lowercase_ ) snake_case_ : Optional[Any] = np.array(lowercase_ ) def _snake_case ( self : Optional[int] ): snake_case_ : List[Any] = len(self ) snake_case_ : List[str] = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : Dict = self.lengths[indices] snake_case_ : str = len(self ) logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def _snake_case ( self : Tuple ): if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : str = self.params.special_tok_ids['''unk_token'''] snake_case_ : str = len(self ) snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : str = (unk_occs / self.lengths) < 0.5 snake_case_ : Optional[Any] = self.token_ids[indices] snake_case_ : Optional[int] = self.lengths[indices] snake_case_ : Dict = len(self ) logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def _snake_case ( self : Dict ): if not self.params.is_master: return logger.info(f"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _snake_case ( self : List[str] , lowercase_ : Dict ): snake_case_ : Optional[int] = [t[0] for t in batch] snake_case_ : str = [t[1] for t in batch] assert len(lowercase_ ) == len(lowercase_ ) # Max for paddings snake_case_ : str = max(lowercase_ ) # Pad token ids if self.params.mlm: snake_case_ : Tuple = self.params.special_tok_ids['''pad_token'''] else: snake_case_ : Dict = self.params.special_tok_ids['''unk_token'''] snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids] assert len(tk_ ) == len(lowercase_ ) assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ ) snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs) return tk_t, lg_t
264
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def a__ ( ) -> Generator[int, None, None]: lowerCamelCase = {} lowerCamelCase = 2 while True: lowerCamelCase = factor_map.pop(snake_case__ , snake_case__ ) if factor: lowerCamelCase = factor + prime while x in factor_map: x += factor lowerCamelCase = factor else: lowerCamelCase = prime yield prime prime += 1 def a__ ( snake_case__ : Optional[int] = 1E10 ) -> int: lowerCamelCase = sieve() lowerCamelCase = 1 while True: lowerCamelCase = next(snake_case__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(snake_case__ ) n += 2 if __name__ == "__main__": print(solution())
353
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __magic_name__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , *_a , _a=None , _a=None , **_a ): """simple docstring""" super().__init__(*_a , **_a ) lowerCamelCase = eval_examples lowerCamelCase = post_process_function def _lowerCAmelCase ( self , _a=None , _a=None , _a=None , _a = "eval" ): """simple docstring""" lowerCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase = self.get_eval_dataloader(_a ) lowerCamelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase = self.compute_metrics lowerCamelCase = None lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase = time.time() try: lowerCamelCase = eval_loop( _a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: lowerCamelCase = compute_metrics lowerCamelCase = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase = self.post_process_function(_a , _a , output.predictions ) lowerCamelCase = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowerCamelCase = metrics.pop(_a ) metrics.update(output.metrics ) else: lowerCamelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _a ) return metrics def _lowerCAmelCase ( self , _a , _a , _a=None , _a = "test" ): """simple docstring""" lowerCamelCase = self.get_test_dataloader(_a ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase = self.compute_metrics lowerCamelCase = None lowerCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCamelCase = time.time() try: lowerCamelCase = eval_loop( _a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_a , metric_key_prefix=_a , ) finally: lowerCamelCase = compute_metrics lowerCamelCase = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( _a , _a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase = self.post_process_function(_a , _a , output.predictions , """predict""" ) lowerCamelCase = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowerCamelCase = metrics.pop(_a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_a )
168
0
"""simple docstring""" class __snake_case : def __init__( self : Dict , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : Tuple = size _lowerCamelCase : List[Any] = [0] * size _lowerCamelCase : Any = [0] * size @staticmethod def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : int ): """simple docstring""" return index | (index + 1) @staticmethod def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : int ): """simple docstring""" return (index & (index + 1)) - 1 def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[Any] = value while index < self.size: _lowerCamelCase : Optional[int] = self.get_prev(lowercase_ ) + 1 if current_left_border == index: _lowerCamelCase : List[Any] = value else: _lowerCamelCase : str = max(lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase : str = self.get_next(lowercase_ ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" right -= 1 # Because of right is exclusive _lowerCamelCase : Any = 0 while left <= right: _lowerCamelCase : str = self.get_prev(lowercase_ ) if left <= current_left: _lowerCamelCase : Dict = max(lowercase_ , self.tree[right] ) _lowerCamelCase : Optional[Any] = current_left else: _lowerCamelCase : Union[str, Any] = max(lowercase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
72
from sklearn.metrics import mean_squared_error import datasets lowerCamelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' lowerCamelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' lowerCamelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def lowerCamelCase ( self : List[Any] ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html' ] , ) def lowerCamelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('float' ) ), "references": datasets.Sequence(datasets.Value('float' ) ), } else: return { "predictions": datasets.Value('float' ), "references": datasets.Value('float' ), } def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]="uniform_average" , lowercase_ : Tuple=True ) -> Any: """simple docstring""" _lowerCamelCase : List[str] =mean_squared_error( lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ ) return {"mse": mse}
199
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : UNetaDModel __SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler def __init__( self , snake_case , snake_case ): super().__init__() self.register_modules(unet=snake_case , scheduler=snake_case ) @torch.no_grad() def __call__( self , snake_case = 1 , snake_case = 2000 , snake_case = None , snake_case = "pil" , snake_case = True , **snake_case , ): snake_case_ = self.unet.config.sample_size snake_case_ = (batch_size, 3, img_size, img_size) snake_case_ = self.unet snake_case_ = randn_tensor(snake_case , generator=snake_case ) * self.scheduler.init_noise_sigma snake_case_ = sample.to(self.device ) self.scheduler.set_timesteps(snake_case ) self.scheduler.set_sigmas(snake_case ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): snake_case_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): snake_case_ = self.unet(snake_case , snake_case ).sample snake_case_ = self.scheduler.step_correct(snake_case , snake_case , generator=snake_case ).prev_sample # prediction step snake_case_ = model(snake_case , snake_case ).sample snake_case_ = self.scheduler.step_pred(snake_case , snake_case , snake_case , generator=snake_case ) snake_case_ , snake_case_ = output.prev_sample, output.prev_sample_mean snake_case_ = sample_mean.clamp(0 , 1 ) snake_case_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(snake_case ) if not return_dict: return (sample,) return ImagePipelineOutput(images=snake_case )
200
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
200
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) _SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""] _SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. _SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
58
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _A ( __UpperCAmelCase ): def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = eval_examples __a = post_process_function def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Dataset] = None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : str = "eval" , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' __a = gen_kwargs.copy() __a = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) __a = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) __a = gen_kwargs __a = self.eval_dataset if eval_dataset is None else eval_dataset __a = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE) __a = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) else: __a = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE) return metrics def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : str = "test" , **__SCREAMING_SNAKE_CASE : Dict): '''simple docstring''' __a = gen_kwargs.copy() __a = self.get_test_dataloader(__SCREAMING_SNAKE_CASE) # Temporarily disable metric computation, we will do it in the loop here. __a = self.compute_metrics __a = None __a = time.time() __a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: __a = compute_metrics __a = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __a = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''') __a = self.compute_metrics(__SCREAMING_SNAKE_CASE) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F'{metric_key_prefix}_'): __a = metrics.pop(__SCREAMING_SNAKE_CASE) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE)
49
0
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def a__ ( __lowercase , __lowercase , __lowercase ) -> Any: # Initialise PyTorch model _A = AlbertConfig.from_json_file(__lowercase ) print(f"""Building PyTorch model from configuration: {config}""" ) _A = AlbertForPreTraining(__lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
163
"""simple docstring""" def a__ ( __lowercase , __lowercase ) -> float: _validate_point(__lowercase ) _validate_point(__lowercase ) if len(__lowercase ) != len(__lowercase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) ) def a__ ( __lowercase ) -> None: if point: if isinstance(__lowercase , __lowercase ): for item in point: if not isinstance(__lowercase , (int, float) ): _A = ( "Expected a list of numbers as input, found " f"""{type(__lowercase ).__name__}""" ) raise TypeError(__lowercase ) else: _A = f"""Expected a list of numbers as input, found {type(__lowercase ).__name__}""" raise TypeError(__lowercase ) else: raise ValueError("Missing an input" ) def a__ ( __lowercase , __lowercase ) -> float: _validate_point(__lowercase ) _validate_point(__lowercase ) if len(__lowercase ) != len(__lowercase ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
163
1
'''simple docstring''' def _a( UpperCamelCase__ : int = 1_0_0_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =2**power SCREAMING_SNAKE_CASE__ : Optional[Any] =0 while n: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =r + n % 1_0, n // 1_0 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
152
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING snake_case_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __magic_name__ ( self : str , __lowercase : str , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =AudioClassificationPipeline(model=__lowercase , feature_extractor=__lowercase ) # test with a raw waveform SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((3_40_00,) ) SCREAMING_SNAKE_CASE__ : str =np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =examples SCREAMING_SNAKE_CASE__ : str =audio_classifier(__lowercase ) # by default a model is initialized with num_labels=2 self.assertEqual( __lowercase , [ {'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )}, {'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =audio_classifier(__lowercase , top_k=1 ) self.assertEqual( __lowercase , [ {'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )}, ] , ) self.run_torchaudio(__lowercase ) @require_torchaudio def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> Optional[Any]: import datasets # test with a local file SCREAMING_SNAKE_CASE__ : Optional[int] =datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) SCREAMING_SNAKE_CASE__ : int =dataset[0]['''audio''']['''array'''] SCREAMING_SNAKE_CASE__ : Optional[Any] =audio_classifier(__lowercase ) self.assertEqual( __lowercase , [ {'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )}, {'''score''': ANY(__lowercase ), '''label''': ANY(__lowercase )}, ] , ) @require_torch def __magic_name__ ( self : List[str] ) -> Dict: SCREAMING_SNAKE_CASE__ : Dict ='''anton-l/wav2vec2-random-tiny-classifier''' SCREAMING_SNAKE_CASE__ : Optional[Any] =pipeline('''audio-classification''' , model=__lowercase ) SCREAMING_SNAKE_CASE__ : str =np.ones((80_00,) ) SCREAMING_SNAKE_CASE__ : List[str] =audio_classifier(__lowercase , top_k=4 ) SCREAMING_SNAKE_CASE__ : Dict =[ {'''score''': 0.0842, '''label''': '''no'''}, {'''score''': 0.0838, '''label''': '''up'''}, {'''score''': 0.0837, '''label''': '''go'''}, {'''score''': 0.0834, '''label''': '''right'''}, ] SCREAMING_SNAKE_CASE__ : List[str] =[ {'''score''': 0.0845, '''label''': '''stop'''}, {'''score''': 0.0844, '''label''': '''on'''}, {'''score''': 0.0841, '''label''': '''right'''}, {'''score''': 0.0834, '''label''': '''left'''}, ] self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) SCREAMING_SNAKE_CASE__ : List[str] ={'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} SCREAMING_SNAKE_CASE__ : Tuple =audio_classifier(__lowercase , top_k=4 ) self.assertIn(nested_simplify(__lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __magic_name__ ( self : Dict ) -> Any: import datasets SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''superb/wav2vec2-base-superb-ks''' SCREAMING_SNAKE_CASE__ : Optional[int] =pipeline('''audio-classification''' , model=__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : List[str] =np.array(dataset[3]['''speech'''] , dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : int =audio_classifier(__lowercase , top_k=4 ) self.assertEqual( nested_simplify(__lowercase , decimals=3 ) , [ {'''score''': 0.981, '''label''': '''go'''}, {'''score''': 0.007, '''label''': '''up'''}, {'''score''': 0.006, '''label''': '''_unknown_'''}, {'''score''': 0.001, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def __magic_name__ ( self : List[str] ) -> Optional[int]: pass
152
1
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowercase__ : Optional[int] = logging.get_logger(__name__) @add_end_docstrings( a__ , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class SCREAMING_SNAKE_CASE (a__ ): def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' if self.framework == "tf": __A : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id).numpy() elif self.framework == "pt": __A : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase) else: raise ValueError('Unsupported framework') return masked_index def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = self.get_masked_index(_UpperCAmelCase) __A : List[str] = np.prod(masked_index.shape) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' if isinstance(_UpperCAmelCase , _UpperCAmelCase): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0]) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase): '''simple docstring''' if return_tensors is None: __A : List[Any] = self.framework __A : List[str] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase) self.ensure_exactly_one_mask_token(_UpperCAmelCase) return model_inputs def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : Dict = self.model(**_UpperCAmelCase) __A : Optional[Any] = model_inputs['input_ids'] return model_outputs def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=None): '''simple docstring''' if target_ids is not None and target_ids.shape[0] < top_k: __A : Any = target_ids.shape[0] __A : Optional[Any] = model_outputs['input_ids'][0] __A : Dict = model_outputs['logits'] if self.framework == "tf": __A : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0] __A : str = outputs.numpy() __A : Union[str, Any] = outputs[0, masked_index, :] __A : str = stable_softmax(_UpperCAmelCase , axis=-1) if target_ids is not None: __A : Tuple = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0) , target_ids.reshape(-1 , 1)) __A : Optional[int] = tf.expand_dims(_UpperCAmelCase , 0) __A : int = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase) __A ,__A : str = topk.values.numpy(), topk.indices.numpy() else: __A : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase).squeeze(-1) # Fill mask pipeline supports only one ${mask_token} per sample __A : Tuple = outputs[0, masked_index, :] __A : Dict = logits.softmax(dim=-1) if target_ids is not None: __A : Optional[Any] = probs[..., target_ids] __A ,__A : List[Any] = probs.topk(_UpperCAmelCase) __A : Dict = [] __A : Optional[int] = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())): __A : Dict = [] for v, p in zip(_values , _predictions): # Copy is important since we're going to modify this array in place __A : Optional[int] = input_ids.numpy().copy() if target_ids is not None: __A : Optional[int] = target_ids[p].tolist() __A : List[Any] = p # Filter padding out: __A : Dict = tokens[np.where(tokens != self.tokenizer.pad_token_id)] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __A : Any = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase) __A : str = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p]), 'sequence': sequence} row.append(_UpperCAmelCase) result.append(_UpperCAmelCase) if single_mask: return result[0] return result def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=None): '''simple docstring''' if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : Dict = [targets] try: __A : Optional[Any] = self.tokenizer.get_vocab() except Exception: __A : str = {} __A : List[str] = [] for target in targets: __A : Any = vocab.get(_UpperCAmelCase , _UpperCAmelCase) if id_ is None: __A : str = self.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['input_ids'] if len(_UpperCAmelCase) == 0: logger.warning( F'The specified target token `{target}` does not exist in the model vocabulary. ' 'We cannot replace it with anything meaningful, ignoring it') continue __A : Dict = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'The specified target token `{target}` does not exist in the model vocabulary. ' F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.') target_ids.append(id_) __A : Dict = list(set(_UpperCAmelCase)) if len(_UpperCAmelCase) == 0: raise ValueError('At least one target must be provided when passed.') __A : List[str] = np.array(_UpperCAmelCase) return target_ids def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=None , _UpperCAmelCase=None): '''simple docstring''' __A : Optional[int] = {} if targets is not None: __A : Union[str, Any] = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase) __A : Dict = target_ids if top_k is not None: __A : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.') return {}, {}, postprocess_params def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase): '''simple docstring''' __A : Dict = super().__call__(_UpperCAmelCase , **_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) and len(_UpperCAmelCase) == 1: return outputs[0] return outputs
190
'''simple docstring''' from math import pi, sqrt, tan def _lowerCAmelCase ( __snake_case : float ) -> float: if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float: if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _lowerCAmelCase ( __snake_case : float ) -> float: if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def _lowerCAmelCase ( __snake_case : float ) -> float: if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float: if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) __A : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def _lowerCAmelCase ( __snake_case : float ) -> float: if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float: if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) __A : int = (sidea + sidea + sidea) / 2 __A : Tuple = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float: if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def _lowerCAmelCase ( __snake_case : float ) -> float: if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float: if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def _lowerCAmelCase ( __snake_case : int , __snake_case : float ) -> float: if not isinstance(__snake_case , __snake_case ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('''[DEMO] Areas of various geometric shapes: \n''') print(f"""Rectangle: {area_rectangle(10, 20) = }""") print(f"""Square: {area_square(10) = }""") print(f"""Triangle: {area_triangle(10, 10) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""") print(f"""Parallelogram: {area_parallelogram(10, 20) = }""") print(f"""Rhombus: {area_rhombus(10, 20) = }""") print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""") print(f"""Circle: {area_circle(20) = }""") print(f"""Ellipse: {area_ellipse(10, 20) = }""") print('''\nSurface Areas of various geometric shapes: \n''') print(f"""Cube: {surface_area_cube(20) = }""") print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""") print(f"""Sphere: {surface_area_sphere(20) = }""") print(f"""Hemisphere: {surface_area_hemisphere(20) = }""") print(f"""Cone: {surface_area_cone(10, 20) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""") print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""") print(f"""Torus: {surface_area_torus(20, 10) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""") print(f"""Square: {area_reg_polygon(4, 10) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
190
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowercase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowercase_ = "cuda" if torch.cuda.is_available() else "cpu" def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : Optional[int]=" " ) -> List[str]: __a = text.split(lowerCAmelCase__ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )] def lowercase ( lowerCAmelCase__ : dict ) -> dict: __a , __a = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(lowerCAmelCase__ ): titles.append(title if title is not None else '''''' ) texts.append(lowerCAmelCase__ ) return {"title": titles, "text": texts} def lowercase ( lowerCAmelCase__ : dict , lowerCAmelCase__ : DPRContextEncoder , lowerCAmelCase__ : DPRContextEncoderTokenizerFast ) -> dict: __a = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] __a = ctx_encoder(input_ids.to(device=lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowercase ( lowerCAmelCase__ : "RagExampleArguments" , lowerCAmelCase__ : "ProcessingArguments" , lowerCAmelCase__ : "IndexHnswArguments" , ) -> Tuple: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __a = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __a = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=processing_args.num_proc ) # And compute the embeddings __a = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCAmelCase__ ) __a = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __a = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space __a = dataset.map( partial(lowerCAmelCase__ , ctx_encoder=lowerCAmelCase__ , ctx_tokenizer=lowerCAmelCase__ ) , batched=lowerCAmelCase__ , batch_size=processing_args.batch_size , features=lowerCAmelCase__ , ) # And finally save your dataset __a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(lowerCAmelCase__ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __a = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=lowerCAmelCase__ ) # And save the index __a = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(lowerCAmelCase__ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : str = field( default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , ) __UpperCAmelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , ) __UpperCAmelCase : str = field( default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , ) __UpperCAmelCase : str = field( default='facebook/dpr-ctx_encoder-multiset-base' , metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) } , ) __UpperCAmelCase : Optional[str] = field( default=str(Path(__SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , ) @dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : Optional[int] = field( default=__SCREAMING_SNAKE_CASE , metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' } , ) __UpperCAmelCase : int = field( default=1_6 , metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' } , ) @dataclass class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : int = field( default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , ) __UpperCAmelCase : int = field( default=1_2_8 , metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowercase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowercase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
45
"""simple docstring""" from __future__ import annotations from typing import Any class _lowerCAmelCase : """simple docstring""" def __init__( self : Any, UpperCAmelCase__ : int ): __lowercase = num_of_nodes __lowercase = [] __lowercase = {} def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self : List[Any], UpperCAmelCase__ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: __lowercase = self.find_component(UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if component_size[u_node] <= component_size[v_node]: __lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase__ ) elif component_size[u_node] >= component_size[v_node]: __lowercase = self.find_component(UpperCAmelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase__ ) def _lowercase ( self : Any ): __lowercase = [] __lowercase = 0 __lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) __lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): __lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase ,__lowercase ,__lowercase = edge __lowercase = self.m_component[u] __lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 __lowercase = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _A ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
17
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase = { "configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"], "configuration_data2vec_text": [ "DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecTextConfig", "Data2VecTextOnnxConfig", ], "configuration_data2vec_vision": [ "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecVisionConfig", "Data2VecVisionOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ "DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecAudioForAudioFrameClassification", "Data2VecAudioForCTC", "Data2VecAudioForSequenceClassification", "Data2VecAudioForXVector", "Data2VecAudioModel", "Data2VecAudioPreTrainedModel", ] lowerCamelCase = [ "DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecTextForCausalLM", "Data2VecTextForMaskedLM", "Data2VecTextForMultipleChoice", "Data2VecTextForQuestionAnswering", "Data2VecTextForSequenceClassification", "Data2VecTextForTokenClassification", "Data2VecTextModel", "Data2VecTextPreTrainedModel", ] lowerCamelCase = [ "DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST", "Data2VecVisionForImageClassification", "Data2VecVisionForMaskedImageModeling", "Data2VecVisionForSemanticSegmentation", "Data2VecVisionModel", "Data2VecVisionPreTrainedModel", ] if is_tf_available(): lowerCamelCase = [ "TFData2VecVisionForImageClassification", "TFData2VecVisionForSemanticSegmentation", "TFData2VecVisionModel", "TFData2VecVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class _UpperCamelCase : '''simple docstring''' def __init__( self : List[Any]): '''simple docstring''' __lowercase =[] __lowercase =0 __lowercase =0 def __lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.head == self.tail def __lowerCamelCase ( self : str , _lowerCAmelCase : Any): '''simple docstring''' self.data.append(_lowerCAmelCase) __lowercase =self.tail + 1 def __lowerCamelCase ( self : Dict): '''simple docstring''' __lowercase =self.data[self.head] __lowercase =self.head + 1 return ret def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self.tail - self.head def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' print(self.data) print('**************') print(self.data[self.head : self.tail]) class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[int] , _lowerCAmelCase : Any): '''simple docstring''' __lowercase =data __lowercase =None __lowercase =None __lowercase =1 def __lowerCamelCase ( self : Any): '''simple docstring''' return self.data def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.left def __lowerCamelCase ( self : Tuple): '''simple docstring''' return self.right def __lowerCamelCase ( self : Dict): '''simple docstring''' return self.height def __lowerCamelCase ( self : int , _lowerCAmelCase : Any): '''simple docstring''' __lowercase =data def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : MyNode | None): '''simple docstring''' __lowercase =node def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : MyNode | None): '''simple docstring''' __lowercase =node def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int): '''simple docstring''' __lowercase =height def _A ( _lowerCAmelCase ): """simple docstring""" if node is None: return 0 return node.get_height() def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if a > b: return a return b def _A ( _lowerCAmelCase ): """simple docstring""" print('left rotation node:' , node.get_data() ) __lowercase =node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_lowerCAmelCase ) __lowercase =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) __lowercase =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def _A ( _lowerCAmelCase ): """simple docstring""" print('right rotation node:' , node.get_data() ) __lowercase =node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_lowerCAmelCase ) __lowercase =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) __lowercase =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =node.get_left() assert left_child is not None node.set_left(left_rotation(_lowerCAmelCase ) ) return right_rotation(_lowerCAmelCase ) def _A ( _lowerCAmelCase ): """simple docstring""" __lowercase =node.get_right() assert right_child is not None node.set_right(right_rotation(_lowerCAmelCase ) ) return left_rotation(_lowerCAmelCase ) def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" if node is None: return MyNode(_lowerCAmelCase ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _lowerCAmelCase ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected __lowercase =node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child __lowercase =right_rotation(_lowerCAmelCase ) else: __lowercase =lr_rotation(_lowerCAmelCase ) else: node.set_right(insert_node(node.get_right() , _lowerCAmelCase ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: __lowercase =node.get_right() assert right_child is not None if data < right_child.get_data(): __lowercase =rl_rotation(_lowerCAmelCase ) else: __lowercase =left_rotation(_lowerCAmelCase ) __lowercase =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) return node def _A ( _lowerCAmelCase ): """simple docstring""" while True: __lowercase =root.get_right() if right_child is None: break __lowercase =right_child return root.get_data() def _A ( _lowerCAmelCase ): """simple docstring""" while True: __lowercase =root.get_left() if left_child is None: break __lowercase =left_child return root.get_data() def _A ( _lowerCAmelCase , _lowerCAmelCase ): """simple docstring""" __lowercase =root.get_left() __lowercase =root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: __lowercase =get_left_most(_lowerCAmelCase ) root.set_data(_lowerCAmelCase ) root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) elif left_child is not None: __lowercase =left_child elif right_child is not None: __lowercase =right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): __lowercase =left_rotation(_lowerCAmelCase ) else: __lowercase =rl_rotation(_lowerCAmelCase ) elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): __lowercase =right_rotation(_lowerCAmelCase ) else: __lowercase =lr_rotation(_lowerCAmelCase ) __lowercase =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(_lowerCAmelCase ) return root class _UpperCamelCase : '''simple docstring''' def __init__( self : Tuple): '''simple docstring''' __lowercase =None def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' return get_height(self.root) def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any): '''simple docstring''' print('insert:' + str(_lowerCAmelCase)) __lowercase =insert_node(self.root , _lowerCAmelCase) def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Any): '''simple docstring''' print('delete:' + str(_lowerCAmelCase)) if self.root is None: print('Tree is empty!') return __lowercase =del_node(self.root , _lowerCAmelCase) def __str__( self : int , ): # a level traversale, gives a more intuitive look on the tree '''simple docstring''' __lowercase ='' __lowercase =MyQueue() q.push(self.root) __lowercase =self.get_height() if layer == 0: return output __lowercase =0 while not q.is_empty(): __lowercase =q.pop() __lowercase =' ' * int(math.pow(2 , layer - 1)) output += space if node is None: output += "*" q.push(_lowerCAmelCase) q.push(_lowerCAmelCase) else: output += str(node.get_data()) q.push(node.get_left()) q.push(node.get_right()) output += space __lowercase =cnt + 1 for i in range(1_0_0): if cnt == math.pow(2 , _lowerCAmelCase) - 1: __lowercase =layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def _A ( ): """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() lowerCamelCase = AVLtree() lowerCamelCase = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
48
0
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ = None , A__ = None , A__ = False , ): """simple docstring""" __lowercase = cipher_alphabet or [chr(A__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __lowercase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary __lowercase = frequencies_dict if not case_sensitive: __lowercase = ciphertext.lower() # Chi squared statistic values __lowercase = {} # cycle through all of the shifts for shift in range(len(A__ ) ): __lowercase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __lowercase = (alphabet_letters.index(letter.lower() ) - shift) % len( A__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __lowercase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __lowercase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __lowercase = decrypted_with_shift.lower().count(A__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowercase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowercase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __lowercase = decrypted_with_shift.count(A__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowercase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowercase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __lowercase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(A__ ) -> tuple[float, str]: return chi_squared_statistic_values[key] __lowercase = min( A__ , key=A__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __lowercase ) , ( __lowercase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
104
from math import isqrt def snake_case ( snake_case__ :int) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__) + 1)) def snake_case ( snake_case__ :int = 10**6) -> int: _A = 0 _A = 1 _A = 7 while prime_candidate < max_prime: primes_count += is_prime(snake_case__) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
180
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
368
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ = 100 ): """simple docstring""" A__ = (n * (n + 1) // 2) ** 2 A__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
154
0
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowerCAmelCase : str = logging.getLogger(__name__) lowerCAmelCase : str = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowerCAmelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __magic_name__ : '''simple docstring''' __UpperCamelCase = field( default=UpperCamelCase__ , metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) } , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase__ )} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) } , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __UpperCamelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) def _lowerCAmelCase ( self ): """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class __magic_name__ : '''simple docstring''' __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) __UpperCamelCase = field(default=UpperCamelCase__ , metadata={"help": "The input training data file (a text file)."} ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) __UpperCamelCase = field( default=5 , metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" } , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) } , ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCamelCase = field( default=0.1_5 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) __UpperCamelCase = field( default=UpperCamelCase__ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) def _lowerCAmelCase ( self ): """simple docstring""" if self.train_file is not None: lowerCamelCase = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCamelCase = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]: with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())] assert len(snake_case_ ) == len(snake_case_ ) lowerCamelCase = {c: dataset[c] for c in dataset.column_names} lowerCamelCase = refs return Dataset.from_dict(snake_case_ ) def a__ ( ) -> Any: lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase , lowerCamelCase , lowerCamelCase = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , snake_case_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): lowerCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , ) lowerCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , ) else: lowerCamelCase = {} if data_args.train_file is not None: lowerCamelCase = data_args.train_file if data_args.validation_file is not None: lowerCamelCase = data_args.validation_file lowerCamelCase = data_args.train_file.split(""".""" )[-1] if extension == "txt": lowerCamelCase = """text""" lowerCamelCase = load_dataset(snake_case_ , data_files=snake_case_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCamelCase = AutoConfig.from_pretrained(model_args.config_name , **snake_case_ ) elif model_args.model_name_or_path: lowerCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: lowerCamelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) lowerCamelCase = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case_ ) elif model_args.model_name_or_path: lowerCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case_ ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: lowerCamelCase = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) lowerCamelCase = AutoModelForMaskedLM.from_config(snake_case_ ) model.resize_token_embeddings(len(snake_case_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCamelCase = datasets["""train"""].column_names else: lowerCamelCase = datasets["""validation"""].column_names lowerCamelCase = """text""" if """text""" in column_names else column_names[0] lowerCamelCase = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(snake_case__ ): # Remove empty lines lowerCamelCase = [line for line in examples["""text"""] if len(snake_case_ ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=snake_case_ , truncation=snake_case_ , max_length=data_args.max_seq_length ) lowerCamelCase = datasets.map( snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCamelCase = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: lowerCamelCase = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer lowerCamelCase = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCamelCase = False # Data collator # This one will take care of randomly masking the tokens. lowerCamelCase = DataCollatorForWholeWordMask(tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCamelCase = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): lowerCamelCase = model_args.model_name_or_path else: lowerCamelCase = None lowerCamelCase = trainer.train(resume_from_checkpoint=snake_case_ ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCamelCase = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(snake_case_ , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation lowerCamelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCamelCase = trainer.evaluate() lowerCamelCase = math.exp(eval_output["""eval_loss"""] ) lowerCamelCase = perplexity lowerCamelCase = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(snake_case_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) return results def a__ ( snake_case__ ) -> Union[str, Any]: main() if __name__ == "__main__": main()
291
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
'''simple docstring''' import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class lowercase__ ( unittest.TestCase ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self : int ): '''simple docstring''' _UpperCamelCase : Dict = ort.SessionOptions() _UpperCamelCase : Optional[Any] = False return options def UpperCamelCase_ ( self : int ): '''simple docstring''' _UpperCamelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo.png' ) _UpperCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/overture-creations-5sI6fQgYIuo_mask.png' ) _UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' ) # using the PNDM scheduler by default _UpperCamelCase : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) _UpperCamelCase : Any = 'A red cat sitting on a park bench' _UpperCamelCase : List[str] = np.random.RandomState(0 ) _UpperCamelCase : str = pipe( prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=lowerCamelCase__ ,output_type='np' ,) _UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
236
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: snake_case_ : List[Any] = None snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} snake_case_ : List[str] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } snake_case_ : str = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowercase__ ( lowercase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = ["""input_ids""", """attention_mask"""] lowercase__ = NllbTokenizer lowercase__ = [] lowercase__ = [] def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token _UpperCamelCase : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,) _UpperCamelCase : int = vocab_file _UpperCamelCase : int = False if not self.vocab_file else True _UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _UpperCamelCase : List[str] = { lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn' _UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang ) _UpperCamelCase : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' _UpperCamelCase : Dict = [self.sep_token_id] _UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _UpperCamelCase : Tuple = src_lang _UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ ) _UpperCamelCase : str = tgt_lang_id return inputs def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,): '''simple docstring''' _UpperCamelCase : Tuple = src_lang _UpperCamelCase : List[str] = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ): '''simple docstring''' _UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: _UpperCamelCase : Optional[Any] = [] _UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : List[Any] = [self.cur_lang_code] _UpperCamelCase : List[Any] = [self.eos_token_id] _UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ): '''simple docstring''' _UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: _UpperCamelCase : Tuple = [] _UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code] else: _UpperCamelCase : Tuple = [self.cur_lang_code] _UpperCamelCase : Optional[Any] = [self.eos_token_id] _UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _UpperCamelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory.' ) return _UpperCamelCase : List[Any] = os.path.join( lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file ,lowerCamelCase__ ) return (out_vocab_file,)
236
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
299
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
186
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class SCREAMING_SNAKE_CASE__ (__snake_case ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __lowerCamelCase : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __lowerCamelCase : ClassVar[Features] = Features({"""text""": Value("""string""" )} ) __lowerCamelCase : ClassVar[Features] = Features({"""summary""": Value("""string""" )} ) __lowerCamelCase : str = "text" __lowerCamelCase : str = "summary" @property def snake_case_ ( self): return {self.text_column: "text", self.summary_column: "summary"}
216
from __future__ import annotations from collections.abc import Callable def snake_case__ ( SCREAMING_SNAKE_CASE_ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int | float , SCREAMING_SNAKE_CASE_ : int = 100 , ): '''simple docstring''' lowercase__ : Tuple = x_start lowercase__ : Tuple = fnc(SCREAMING_SNAKE_CASE_ ) lowercase__ : List[Any] = 0.0 for _ in range(SCREAMING_SNAKE_CASE_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase__ : Any = (x_end - x_start) / steps + xa lowercase__ : Optional[Any] = fnc(SCREAMING_SNAKE_CASE_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase__ : Any = xa lowercase__ : str = fxa return area if __name__ == "__main__": def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple ): '''simple docstring''' return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') snake_case_ = 10 while i <= 100_000: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
216
1
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _A = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex _A = 10 _A = 256 def lowerCamelCase__ ( a__ : List[str] ) -> Optional[MinHash]: if len(a__ ) < MIN_NUM_TOKENS: return None UpperCamelCase_ = MinHash(num_perm=a__ ) for token in set(a__ ): min_hash.update(token.encode() ) return min_hash def lowerCamelCase__ ( a__ : str ) -> Set[str]: return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0} class lowercase_ : def __init__( self , *, __UpperCamelCase = 0.85 , ): """simple docstring""" UpperCamelCase_ = duplication_jaccard_threshold UpperCamelCase_ = NUM_PERM UpperCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) UpperCamelCase_ = defaultdict(__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self._index.query(__UpperCamelCase ) if code_key in self._index.keys: print(f'''Duplicate key {code_key}''' ) return self._index.insert(__UpperCamelCase , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__UpperCamelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = [] for base, duplicates in self._duplicate_clusters.items(): UpperCamelCase_ = [base] + list(__UpperCamelCase ) # reformat the cluster to be a list of dict UpperCamelCase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__UpperCamelCase ) return duplicate_clusters def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = self.get_duplicate_clusters() with open(__UpperCamelCase , """w""" ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) def lowerCamelCase__ ( a__ : Optional[int] ) -> List[str]: UpperCamelCase_ , UpperCamelCase_ = element UpperCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowerCamelCase__ ( a__ : Type[Dataset] ) -> Optional[Any]: with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float ) -> List[Any]: UpperCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=a__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ): di.add(a__ , a__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowerCamelCase__ ( a__ : str , a__ : str ) -> float: UpperCamelCase_ = get_tokens(a__ ) UpperCamelCase_ = get_tokens(a__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _A = None def lowerCamelCase__ ( a__ : str , a__ : str ) -> Optional[Any]: UpperCamelCase_ = [] for elementa in cluster: UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(a__ , a__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: UpperCamelCase_ = 1 extremes.append(a__ ) return extremes def lowerCamelCase__ ( a__ : str , a__ : Optional[int] , a__ : Optional[int] ) -> str: global _shared_dataset UpperCamelCase_ = dataset UpperCamelCase_ = [] UpperCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( a__ , a__ , ) , total=len(a__ ) , ): extremes_list.append(a__ ) return extremes_list def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: UpperCamelCase_ = make_duplicate_clusters(a__ , a__ ) UpperCamelCase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} UpperCamelCase_ = {} UpperCamelCase_ = find_extremes(a__ , a__ , a__ ) for extremes in extremes_clusters: for element in extremes: UpperCamelCase_ = element UpperCamelCase_ = duplicate_indices - set(extreme_dict.keys() ) UpperCamelCase_ = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: UpperCamelCase_ = element["""base_index"""] in extreme_dict if element["is_extreme"]: UpperCamelCase_ = extreme_dict[element["""base_index"""]]["""copies"""] print(f'''Original dataset size: {len(a__ )}''' ) print(f'''Number of duplicate clusters: {len(a__ )}''' ) print(f'''Files in duplicate cluster: {len(a__ )}''' ) print(f'''Unique files in duplicate cluster: {len(a__ )}''' ) print(f'''Filtered dataset size: {len(a__ )}''' ) return ds_filter, duplicate_clusters
122
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : int = ["""image_processor""", """tokenizer"""] A__ : Union[str, Any] = """LayoutLMv2ImageProcessor""" A__ : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCamelCase , ) UpperCamelCase_ = kwargs.pop("""feature_extractor""" ) UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes """ """if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" ) # first, apply the image processor UpperCamelCase_ = self.image_processor(images=__UpperCamelCase , return_tensors=__UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCamelCase_ = features["""words"""] UpperCamelCase_ = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , ) # add pixel values UpperCamelCase_ = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: UpperCamelCase_ = self.get_overflowing_images(__UpperCamelCase , encoded_inputs["""overflow_to_sample_mapping"""] ) UpperCamelCase_ = images return encoded_inputs def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' ) return images_with_overflow def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def lowerCamelCase_ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowerCamelCase_ ( self ): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , ) return self.image_processor_class @property def lowerCamelCase_ ( self ): """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , ) return self.image_processor
122
1
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = DepthEstimationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _a = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __UpperCAmelCase ) import datasets _a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) _a = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , __UpperCAmelCase , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def _UpperCAmelCase ( self ) -> Tuple: pass @slow @require_torch def _UpperCAmelCase ( self ) -> List[str]: _a = '''Intel/dpt-large''' _a = pipeline('''depth-estimation''' , model=__UpperCAmelCase ) _a = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) _a = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
153
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Dict: _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def _UpperCAmelCase ( self ) -> str: _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _a = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
153
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 42 @flax_register_to_config class UpperCAmelCase ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") snake_case_ = False snake_case_ = (320, 640, 1280, 1280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def UpperCamelCase_ ( self : Tuple ,A : jax.random.KeyArray ): # init input tensors __A = (1, self.in_channels, self.sample_size, self.sample_size) __A = jnp.zeros(A ,dtype=jnp.floataa ) __A = jnp.ones((1,) ,dtype=jnp.intaa ) __A = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa ) __A , __A = jax.random.split(A ) __A = {"params": params_rng, "dropout": dropout_rng} return self.init(A ,A ,A ,A )["params"] def UpperCamelCase_ ( self : Dict ): __A = self.block_out_channels __A = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __A = self.num_attention_heads or self.attention_head_dim # input __A = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time __A = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift ) __A = FlaxTimestepEmbedding(A ,dtype=self.dtype ) __A = self.only_cross_attention if isinstance(A ,A ): __A = (only_cross_attention,) * len(self.down_block_types ) if isinstance(A ,A ): __A = (num_attention_heads,) * len(self.down_block_types ) # down __A = [] __A = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __A = output_channel __A = block_out_channels[i] __A = i == len(A ) - 1 if down_block_type == "CrossAttnDownBlock2D": __A = FlaxCrossAttnDownBlockaD( in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: __A = FlaxDownBlockaD( in_channels=A ,out_channels=A ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(A ) __A = down_blocks # mid __A = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) # up __A = [] __A = list(reversed(A ) ) __A = list(reversed(A ) ) __A = list(reversed(A ) ) __A = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __A = output_channel __A = reversed_block_out_channels[i] __A = reversed_block_out_channels[min(i + 1 ,len(A ) - 1 )] __A = i == len(A ) - 1 if up_block_type == "CrossAttnUpBlock2D": __A = FlaxCrossAttnUpBlockaD( in_channels=A ,out_channels=A ,prev_output_channel=A ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) else: __A = FlaxUpBlockaD( in_channels=A ,out_channels=A ,prev_output_channel=A ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,) up_blocks.append(A ) __A = output_channel __A = up_blocks # out __A = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __A = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : Optional[Any] ,A : Tuple ,A : List[Any] ,A : List[str] ,A : Union[str, Any]=None ,A : Union[str, Any]=None ,A : bool = True ,A : bool = False ,): # 1. time if not isinstance(A ,jnp.ndarray ): __A = jnp.array([timesteps] ,dtype=jnp.intaa ) elif isinstance(A ,jnp.ndarray ) and len(timesteps.shape ) == 0: __A = timesteps.astype(dtype=jnp.floataa ) __A = jnp.expand_dims(A ,0 ) __A = self.time_proj(A ) __A = self.time_embedding(A ) # 2. pre-process __A = jnp.transpose(A ,(0, 2, 3, 1) ) __A = self.conv_in(A ) # 3. down __A = (sample,) for down_block in self.down_blocks: if isinstance(A ,A ): __A , __A = down_block(A ,A ,A ,deterministic=not train ) else: __A , __A = down_block(A ,A ,deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __A = () for down_block_res_sample, down_block_additional_residual in zip( A ,A ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __A = new_down_block_res_samples # 4. mid __A = self.mid_block(A ,A ,A ,deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __A = down_block_res_samples[-(self.layers_per_block + 1) :] __A = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(A ,A ): __A = up_block( A ,temb=A ,encoder_hidden_states=A ,res_hidden_states_tuple=A ,deterministic=not train ,) else: __A = up_block(A ,temb=A ,res_hidden_states_tuple=A ,deterministic=not train ) # 6. post-process __A = self.conv_norm_out(A ) __A = nn.silu(A ) __A = self.conv_out(A ) __A = jnp.transpose(A ,(0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=A )
15
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ = logging.get_logger(__name__) a__ = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class UpperCAmelCase_ ( __lowercase , __lowercase ): """simple docstring""" UpperCAmelCase__ : Any = "bit" UpperCAmelCase__ : Optional[int] = ["preactivation", "bottleneck"] UpperCAmelCase__ : Optional[Any] = ["SAME", "VALID"] def __init__( self , _a=3 , _a=6_4 , _a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _a=[3, 4, 6, 3] , _a="preactivation" , _a="relu" , _a=None , _a=3_2 , _a=0.0 , _a=False , _a=3_2 , _a=1 , _a=None , _a=None , **_a , ) -> Union[str, Any]: super().__init__(**_a ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: _a : Any = global_padding.upper() else: raise ValueError(F"""Padding strategy {global_padding} not supported""" ) _a : Optional[int] = num_channels _a : List[Any] = embedding_size _a : Any = hidden_sizes _a : int = depths _a : Dict = layer_type _a : int = hidden_act _a : Optional[Any] = global_padding _a : Optional[Any] = num_groups _a : Union[str, Any] = drop_path_rate _a : Tuple = embedding_dynamic_padding _a : Union[str, Any] = output_stride _a : Any = width_factor _a : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )] _a , _a : List[str] = get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
235
0
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _a ( _A ): def __lt__( self : Union[str, Any], lowerCAmelCase__ : Any ) -> Any: '''simple docstring''' return self[-1] < other[-1] def __eq__( self : Union[str, Any], lowerCAmelCase__ : int ) -> str: '''simple docstring''' return self[-1] == other[-1] def a_ ( _lowercase ): _UpperCamelCase : List[Any] = [] # sort into stacks for element in collection: _UpperCamelCase : List[Any] = Stack([element] ) _UpperCamelCase : Tuple = bisect_left(lowercase__ , lowercase__ ) if i != len(lowercase__ ): stacks[i].append(lowercase__ ) else: stacks.append(lowercase__ ) # use a heap-based merge to merge stack efficiently _UpperCamelCase : int = merge(*(reversed(lowercase__ ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCamelCase_ =input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase_ =[int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
363
"""simple docstring""" from functools import lru_cache @lru_cache def a_ ( _lowercase ): if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
128
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : Any = ["pixel_values"] def __init__( self : str , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , **lowercase_ : Dict , ) -> None: super().__init__(**lowercase_ ) lowercase__ : List[str] = size if size is not None else {"shortest_edge": 2_24} lowercase__ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : Optional[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56} lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" ) lowercase__ : Tuple = do_resize lowercase__ : int = size lowercase__ : str = resample lowercase__ : Optional[Any] = do_rescale lowercase__ : Tuple = rescale_factor lowercase__ : List[str] = do_center_crop lowercase__ : List[str] = crop_size lowercase__ : Dict = do_flip_channel_order def __UpperCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: lowercase__ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Optional[int] = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray: lowercase__ : Any = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> Optional[Any]: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: return flip_channel_order(lowercase_ , data_format=lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Any = resample if resample is not None else self.resample lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : int = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) lowercase__ : Dict = size if size is not None else self.size lowercase__ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : str = crop_size if crop_size is not None else self.crop_size lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" ) lowercase__ : Optional[Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) # All transformations expect numpy arrays. lowercase__ : Tuple = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: lowercase__ : str = [self.flip_channel_order(image=lowercase_ ) for image in images] lowercase__ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : Optional[int] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[Tuple] = None ) -> List[Any]: lowercase__ : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(lowercase_ ): lowercase__ : Any = target_sizes.numpy() lowercase__ : Optional[Any] = [] for idx in range(len(lowercase_ ) ): lowercase__ : Tuple = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ ) lowercase__ : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowercase__ : Optional[int] = logits.argmax(dim=1 ) lowercase__ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
87
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": _a = argparse.ArgumentParser( description=( """Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""]) parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") _a = parser.parse_args() if args.model_type == "bert": _a = BertForMaskedLM.from_pretrained(args.model_name) _a = """bert""" else: raise ValueError("""args.model_type should be \"bert\".""") _a = model.state_dict() _a = {} for w in ["word_embeddings", "position_embeddings"]: _a = state_dict[F"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: _a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""] _a = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] _a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 _a = state_dict["""cls.predictions.decoder.weight"""] _a = state_dict["""cls.predictions.bias"""] if args.vocab_transform: for w in ["weight", "bias"]: _a = state_dict[F"""cls.predictions.transform.dense.{w}"""] _a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
194
0
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = BlenderbotSmallTokenizer snake_case__ : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() a_ : Union[str, Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] a_ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) a_ : Tuple = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] a_ : Dict = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: a_ : Tuple = 'adapt act apte' a_ : List[str] = 'adapt act apte' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: a_ : Union[str, Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a_ : Any = 'adapt act apte' a_ : Optional[int] = ['adapt', 'act', 'ap@@', 'te'] a_ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] a_ : str = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> int: a_ : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1_3_8_4] a_ : Optional[int] = 'I am a small frog.' a_ : Tuple = tok([src_text] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids'] a_ : str = tok.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: a_ : Any = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) a_ : Tuple = 'I am a small frog .' a_ : Optional[Any] = '.' a_ : Optional[int] = tok(SCREAMING_SNAKE_CASE__ )['input_ids'] a_ : Union[str, Any] = tok(SCREAMING_SNAKE_CASE__ )['input_ids'] assert encoded[-1] == encoded_dot[0]
120
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ): snake_case__ : Dict = 1 @register_to_config def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-3 ) -> Optional[int]: a_ : Tuple = None a_ : int = None a_ : Tuple = None def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> List[str]: a_ : Tuple = torch.linspace(1 , self.config.sampling_eps , SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Tuple: if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score a_ : Tuple = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) a_ : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) a_ : Dict = std.flatten() while len(std.shape ) < len(score.shape ): a_ : str = std.unsqueeze(-1 ) a_ : List[str] = -score / std # compute a_ : List[str] = -1.0 / len(self.timesteps ) a_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) a_ : Optional[Any] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): a_ : List[str] = beta_t.unsqueeze(-1 ) a_ : Optional[Any] = -0.5 * beta_t * x a_ : Tuple = torch.sqrt(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = drift - diffusion**2 * score a_ : List[str] = x + drift * dt # add noise a_ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=SCREAMING_SNAKE_CASE__ , device=x.device , dtype=x.dtype ) a_ : Optional[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : int ) -> Tuple: return self.config.num_train_timesteps
120
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( __lowercase ,unittest.TestCase ): _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : List[Any] = BloomTokenizerFast _SCREAMING_SNAKE_CASE : Dict = BloomTokenizerFast _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Dict = False _SCREAMING_SNAKE_CASE : Optional[Any] = "tokenizer_file" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def lowerCAmelCase (self : int ): super().setUp() __a : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase (self : Optional[int] , **snake_case_ : str ): kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCAmelCase (self : int ): __a : List[Any] = self.get_rust_tokenizer() __a : Any = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] __a : Optional[int] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] __a : str = tokenizer.batch_encode_plus(snake_case_ )['''input_ids'''] self.assertListEqual(snake_case_ , snake_case_ ) __a : List[str] = tokenizer.batch_decode(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) def lowerCAmelCase (self : Optional[int] , snake_case_ : List[str]=6 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): __a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __a : List[str] = '''This is a simple input''' __a : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] __a : Any = ('''This is a simple input''', '''This is a pair''') __a : Any = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(snake_case_ , max_length=snake_case_ ) tokenizer_r.encode_plus(snake_case_ , max_length=snake_case_ ) tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ ) tokenizer_r.encode(snake_case_ , max_length=snake_case_ ) tokenizer_r.batch_encode_plus(snake_case_ , max_length=snake_case_ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) __a : List[Any] = None # Hotfixing padding = None self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' ) # Simple input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' ) # Simple input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , ) # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding='''max_length''' ) # Pair input self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' ) # Pair input self.assertRaises( snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding='''max_length''' , ) def lowerCAmelCase (self : Optional[Any] ): __a : Dict = self.get_rust_tokenizer() __a : Optional[Any] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=snake_case_ ) __a : List[Any] = next(iter(snake_case_ ) )['''premise'''] # pick up one data __a : Tuple = list(sample_data.values() ) __a : Any = list(map(tokenizer.encode , snake_case_ ) ) __a : Optional[Any] = [tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ ) for x in output_tokens] self.assertListEqual(snake_case_ , snake_case_ ) def lowerCAmelCase (self : int ): # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
216
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowercase__ =logging.get_logger(__name__) def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ): __a : List[str] = os.path.abspath(lowerCAmelCase__ ) logger.info(f"Converting TensorFlow checkpoint from {tf_path}" ) # Load weights from TF model __a : Tuple = tf.train.list_variables(lowerCAmelCase__ ) __a : Optional[Any] = [] __a : Union[str, Any] = [] __a : str = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") __a : Any = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f"Skipping non-model layer {full_name}" ) continue if "optimizer" in full_name: logger.info(f"Skipping optimization layer {full_name}" ) continue if name[0] == "model": # ignore initial 'model' __a : Any = name[1:] # figure out how many levels deep the name is __a : List[Any] = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(lowerCAmelCase__ ) # read data __a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ ) names.append('''/'''.join(lowerCAmelCase__ ) ) arrays.append(lowerCAmelCase__ ) logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" ) # Sanity check if len(set(lowerCAmelCase__ ) ) != 1: raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" ) __a : int = list(set(lowerCAmelCase__ ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ): __a : int = full_name.split('''/''' ) __a : Tuple = model __a : Dict = [] for i, m_name in enumerate(lowerCAmelCase__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): __a : Union[str, Any] = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' ) __a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) __a : Dict = getattr(lowerCAmelCase__ , '''encoder''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' ) __a : Any = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) __a : Any = getattr(lowerCAmelCase__ , '''pooler''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) __a : int = getattr(lowerCAmelCase__ , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) __a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' ) else: raise ValueError(f"Unknown embedding layer with name {full_name}" ) trace.append('''weight''' ) __a : Tuple = getattr(lowerCAmelCase__ , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) __a : int = getattr(lowerCAmelCase__ , '''attention''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''output''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' ) __a : Any = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) __a : Tuple = getattr(lowerCAmelCase__ , '''output''' ) __a : str = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) __a : int = getattr(lowerCAmelCase__ , '''output''' ) __a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) __a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) __a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' ) __a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) __a : int = getattr(lowerCAmelCase__ , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) __a : Dict = getattr(lowerCAmelCase__ , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) __a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' ) else: logger.warning(f"Ignored {m_name}" ) # for certain layers reshape is necessary __a : List[str] = '''.'''.join(lowerCAmelCase__ ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ): __a : str = array.reshape(pointer.data.shape ) if "kernel" in full_name: __a : Optional[Any] = array.transpose() if pointer.shape == array.shape: __a : str = torch.from_numpy(lowerCAmelCase__ ) else: raise ValueError( f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:" f" {array.shape}" ) logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" ) return model def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ): # Instantiate model logger.info(f"Loading model based on config from {config_path}..." ) __a : Dict = BertConfig.from_json_file(lowerCAmelCase__ ) __a : int = BertModel(lowerCAmelCase__ ) # Load weights from checkpoint logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." ) load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." ) torch.save(model.state_dict() , lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ =argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) lowercase__ =parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
216
1
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : Tuple = len(_A ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase__ : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_A ): return None UpperCAmelCase__ : List[str] = sorted_collection[point] if current_item == item: return point else: if point < left: UpperCAmelCase__ : Union[str, Any] = left UpperCAmelCase__ : Any = point elif point > right: UpperCAmelCase__ : Optional[Any] = right UpperCAmelCase__ : List[Any] = point else: if item < current_item: UpperCAmelCase__ : Any = point - 1 else: UpperCAmelCase__ : Tuple = point + 1 return None def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None UpperCAmelCase__ : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_A ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_A , _A , _A , _A ) elif point > right: return interpolation_search_by_recursion(_A , _A , _A , _A ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _A , _A , _A , point - 1 ) else: return interpolation_search_by_recursion( _A , _A , point + 1 , _A ) def _UpperCamelCase ( UpperCamelCase__ ): if collection != sorted(_A ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys __A =0 if debug == 1: __A =[10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') __A =67 __A =interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print('Not found')
358
'''simple docstring''' import numpy class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Dict = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCAmelCase__ : Optional[Any] = numpy.random.rand( self.input_array.shape[1] , 4) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCAmelCase__ : Optional[int] = numpy.random.rand( 4 , 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCAmelCase__ : Any = numpy.random.rand(3 , 1) # Real output values provided. UpperCAmelCase__ : Tuple = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCAmelCase__ : Union[str, Any] = numpy.zeros(output_array.shape) def snake_case__ ( self): UpperCAmelCase__ : List[str] = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights)) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCAmelCase__ : Any = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCAmelCase__ : Tuple = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return self.layer_between_second_hidden_layer_and_output def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , ) UpperCAmelCase__ : str = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , ) UpperCAmelCase__ : Any = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): for iteration in range(1 , iterations + 1): UpperCAmelCase__ : Optional[Any] = self.feedforward() self.back_propagation() if give_loss: UpperCAmelCase__ : str = numpy.mean(numpy.square(output - self.feedforward())) print(f'''Iteration {iteration} Loss: {loss}''') def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : List[Any] = input_arr UpperCAmelCase__ : Tuple = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights)) UpperCAmelCase__ : List[Any] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) UpperCAmelCase__ : Optional[Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return int(self.layer_between_second_hidden_layer_and_output > 0.6) def _UpperCamelCase ( UpperCamelCase__ ): return 1 / (1 + numpy.exp(-value )) def _UpperCamelCase ( UpperCamelCase__ ): return (value) * (1 - (value)) def _UpperCamelCase ( ): UpperCAmelCase__ : Union[str, Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. UpperCAmelCase__ : str = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. UpperCAmelCase__ : List[Any] = TwoHiddenLayerNeuralNetwork( input_array=UpperCamelCase__ , output_array=UpperCamelCase__ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=UpperCamelCase__ , iterations=1_0 , give_loss=UpperCamelCase__ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
283
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowercase : def __init__( self ,A__ ,): lowercase = parent lowercase = 1_3 lowercase = 7 lowercase = 3_0 lowercase = self.seq_length + self.mem_len lowercase = 1_5 lowercase = True lowercase = True lowercase = 9_9 lowercase = [1_0, 5_0, 8_0] lowercase = 3_2 lowercase = 3_2 lowercase = 4 lowercase = 8 lowercase = 1_2_8 lowercase = 2 lowercase = 2 lowercase = None lowercase = 1 lowercase = 0 lowercase = 3 lowercase = self.vocab_size - 1 lowercase = 0.01 def A__ ( self): lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) lowercase = None if self.use_labels: lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) lowercase = TransfoXLConfig( vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,) return (config, input_ids_a, input_ids_a, lm_labels) def A__ ( self): random.seed(self.seed) tf.random.set_seed(self.seed) def A__ ( self ,A__ ,A__ ,A__ ,A__): lowercase = TFTransfoXLModel(_lowerCamelCase) lowercase , lowercase = model(_lowerCamelCase).to_tuple() lowercase = {'''input_ids''': input_ids_a, '''mems''': mems_a} lowercase , lowercase = model(_lowerCamelCase).to_tuple() self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def A__ ( self ,A__ ,A__ ,A__ ,A__): lowercase = TFTransfoXLLMHeadModel(_lowerCamelCase) lowercase , lowercase = model(_lowerCamelCase).to_tuple() lowercase = {'''input_ids''': input_ids_a, '''labels''': lm_labels} lowercase , lowercase = model(_lowerCamelCase).to_tuple() lowercase , lowercase = model([input_ids_a, mems_a]).to_tuple() lowercase = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} lowercase , lowercase = model(_lowerCamelCase).to_tuple() self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,) def A__ ( self ,A__ ,A__ ,A__ ,A__): lowercase = TFTransfoXLForSequenceClassification(_lowerCamelCase) lowercase = model(_lowerCamelCase) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels)) def A__ ( self): lowercase = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs lowercase = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class lowercase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): lowercase_ : str =( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowercase_ : Optional[int] =() if is_tf_available() else () lowercase_ : Optional[Any] =( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowercase_ : int =False lowercase_ : Optional[int] =False lowercase_ : Dict =False lowercase_ : str =False def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def A__ ( self): lowercase = TFTransfoXLModelTester(self) lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,d_embed=3_7) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): self.model_tester.set_seed() lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase) def A__ ( self): self.model_tester.set_seed() lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase) def A__ ( self): lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase) def A__ ( self): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() lowercase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: lowercase = model_class(_lowerCamelCase) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer) if model_class in list_other_models_with_output_ebd: lowercase = model.get_output_embeddings() assert isinstance(_lowerCamelCase ,tf.keras.layers.Layer) lowercase = model.get_bias() assert name is None else: lowercase = model.get_output_embeddings() assert x is None lowercase = model.get_bias() assert name is None def A__ ( self): pass @slow def A__ ( self): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase = TFTransfoXLModel.from_pretrained(_lowerCamelCase) self.assertIsNotNone(_lowerCamelCase) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''') def A__ ( self): pass @require_tf class lowercase ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''') @slow def A__ ( self): lowercase = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''') # fmt: off lowercase = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] ,dtype=tf.intaa) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off lowercase = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> lowercase = model.generate(_lowerCamelCase ,max_length=2_0_0 ,do_sample=_lowerCamelCase) self.assertListEqual(output_ids[0].numpy().tolist() ,_lowerCamelCase)
101
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case : '''simple docstring''' def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ): '''simple docstring''' __A = np.random.default_rng(_lowerCamelCase ) __A = length __A = rng.normal(size=(length,) ).astype(np.floataa ) __A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa ) def __len__( self : str ): '''simple docstring''' return self.length def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a[0] + self.b[0] class snake_case ( torch.nn.Module ): '''simple docstring''' def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ): '''simple docstring''' super().__init__() __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() ) __A = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) __A = False return x * self.a + self.b def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer __A = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} __A = load_dataset('''csv''' , data_files=__UpperCamelCase ) __A = datasets['''train'''].unique('''label''' ) __A = {v: i for i, v in enumerate(__UpperCamelCase )} def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) if "label" in examples: __A = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __A = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 ) __A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 ) return train_dataloader, eval_dataloader
266
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCAmelCase = {"tokenization_bertweet": ["BertweetTokenizer"]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
366
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record __UpperCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' __UpperCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' __UpperCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _snake_case ( A , A ) -> List[Any]: return float((preds == labels).mean() ) def _snake_case ( A , A , A="binary" ) -> int: lowerCAmelCase__ = simple_accuracy(A , A ) lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A , average=A ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( A , A ) -> List[Any]: lowerCAmelCase__ = {} for id_pred, label in zip(A , A ): lowerCAmelCase__ = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}""" lowerCAmelCase__ = id_pred['''prediction'''] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ = zip(*A ) lowerCAmelCase__ = fa_score(y_true=A , y_pred=A , average='''macro''' ) fas.append(A ) lowerCAmelCase__ = int(sum(pred == label for pred, label in preds_labels ) == len(A ) ) ems.append(A ) lowerCAmelCase__ = float(sum(A ) / len(A ) ) lowerCAmelCase__ = sum(A ) / len(A ) lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self ) -> Dict: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "prediction_text": datasets.Value('''string''' ), }, "references": { "idx": { "passage": datasets.Value('''int64''' ), "query": datasets.Value('''int64''' ), }, "answers": datasets.Sequence(datasets.Value('''string''' ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value('''int64''' ), "paragraph": datasets.Value('''int64''' ), "question": datasets.Value('''int64''' ), }, "prediction": datasets.Value('''int64''' ), }, "references": datasets.Value('''int64''' ), } else: return { "predictions": datasets.Value('''int64''' ), "references": datasets.Value('''int64''' ), } def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )} elif self.config_name == "cb": return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ , fa_avg='''macro''' ) elif self.config_name == "record": lowerCAmelCase__ = [ { '''qas''': [ {'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]} for ref in references ] } ] lowerCAmelCase__ = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions} return evaluate_record(lowerCamelCase_ , lowerCamelCase_ )[0] elif self.config_name == "multirc": return evaluate_multirc(lowerCamelCase_ , lowerCamelCase_ ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
228
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCamelCase ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=0 ): '''simple docstring''' if str(__lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase__ = torch.manual_seed(__lowerCAmelCase ) else: lowerCamelCase__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) lowerCamelCase__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) lowerCamelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) lowerCamelCase__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) lowerCamelCase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCamelCase ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_save_load_local() def __lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
209
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowerCAmelCase__() -> Any: '''simple docstring''' with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" lowerCamelCase__ = [1, 2, 3] with pytest.raises(__snake_case ): with parallel_backend('''unsupported backend''' ): map_nested(__snake_case ,__snake_case ,num_proc=2 ) with pytest.raises(__snake_case ): with parallel_backend('''unsupported backend''' ): map_nested(__snake_case ,__snake_case ,num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' ,[2, -1] ) def lowerCAmelCase__(__snake_case ) -> Tuple: '''simple docstring''' lowerCamelCase__ = [1, 2] lowerCamelCase__ = {'''a''': 1, '''b''': 2} lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]} lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2} lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} lowerCamelCase__ = [2, 3] lowerCamelCase__ = {'''a''': 2, '''b''': 3} lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]} lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3} lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
209
1
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __UpperCamelCase ( lowercase__ ): def a__ ( self :str ): snake_case_ : Any = tempfile.mkdtemp() snake_case_ : Tuple = 8 # DPR tok snake_case_ : Optional[int] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ : List[str] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.join(_UpperCamelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok snake_case_ : Dict = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] snake_case_ : Tuple = dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) ) snake_case_ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] snake_case_ : Union[str, Any] = {"""unk_token""": """<unk>"""} snake_case_ : List[Any] = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) snake_case_ : List[str] = os.path.join(_UpperCamelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case_ : Tuple = os.path.join(_UpperCamelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCamelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_UpperCamelCase ) ) def a__ ( self :Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def a__ ( self :Dict ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def a__ ( self :Optional[Any] ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def a__ ( self :str ): snake_case_ : Tuple = os.path.join(self.tmpdirname ,"""rag_tokenizer""" ) snake_case_ : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) snake_case_ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(_UpperCamelCase ) rag_tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ : Union[str, Any] = RagTokenizer.from_pretrained(_UpperCamelCase ,config=_UpperCamelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,_UpperCamelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,_UpperCamelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def a__ ( self :Any ): snake_case_ : List[Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) snake_case_ : Union[str, Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] snake_case_ : Tuple = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @slow def a__ ( self :Dict ): snake_case_ : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) snake_case_ : Optional[Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] snake_case_ : str = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase )
363
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class A__ ( __snake_case ): _UpperCAmelCase :List[Any] = (DDIMParallelScheduler,) _UpperCAmelCase :Any = (('eta', 0.0), ('num_inference_steps', 5_0)) def __UpperCamelCase( self , **A_ ): '''simple docstring''' UpperCamelCase : List[str] = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**A_ ) return config def __UpperCamelCase( self , **A_ ): '''simple docstring''' UpperCamelCase : Tuple = self.scheduler_classes[0] UpperCamelCase : List[Any] = self.get_scheduler_config(**A_ ) UpperCamelCase : Dict = scheduler_class(**A_ ) UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0 UpperCamelCase : List[Any] = self.dummy_model() UpperCamelCase : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(A_ ) for t in scheduler.timesteps: UpperCamelCase : Any = model(A_ , A_ ) UpperCamelCase : Any = scheduler.step(A_ , A_ , A_ , A_ ).prev_sample return sample def __UpperCamelCase( self ): '''simple docstring''' for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=A_ ) UpperCamelCase : Optional[Any] = self.scheduler_classes[0] UpperCamelCase : Dict = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase : List[str] = scheduler_class(**A_ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __UpperCamelCase( self ): '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=A_ ) def __UpperCamelCase( self ): '''simple docstring''' self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def __UpperCamelCase( self ): '''simple docstring''' for t in [1, 10, 49]: self.check_over_forward(time_step=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=A_ , num_inference_steps=A_ ) def __UpperCamelCase( self ): '''simple docstring''' for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=A_ , eta=A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.scheduler_classes[0] UpperCamelCase : Union[str, Any] = self.get_scheduler_config() UpperCamelCase : List[Any] = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[str] = self.scheduler_classes[0] UpperCamelCase : Any = self.get_scheduler_config() UpperCamelCase : List[Any] = scheduler_class(**A_ ) UpperCamelCase , UpperCamelCase : Tuple = 10, 0.0 scheduler.set_timesteps(A_ ) UpperCamelCase : Tuple = self.dummy_model() UpperCamelCase : List[str] = self.dummy_sample_deter UpperCamelCase : Optional[int] = self.dummy_sample_deter + 0.1 UpperCamelCase : Optional[int] = self.dummy_sample_deter - 0.1 UpperCamelCase : Optional[Any] = samplea.shape[0] UpperCamelCase : Dict = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase : int = torch.arange(A_ )[0:3, None].repeat(1 , A_ ) UpperCamelCase : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase : Optional[int] = scheduler.batch_step_no_noise(A_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A_ ) UpperCamelCase : Optional[Any] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Any = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2 assert abs(result_mean.item() - 0.49_82 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop() UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.full_loop(prediction_type="v_prediction" ) UpperCamelCase : List[str] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Optional[int] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 52.53_02 ) < 1e-2 assert abs(result_mean.item() - 0.06_84 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 ) UpperCamelCase : Dict = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2 assert abs(result_mean.item() - 0.19_51 ) < 1e-3 def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 ) UpperCamelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) ) UpperCamelCase : Dict = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2 assert abs(result_mean.item() - 0.19_41 ) < 1e-3
52
from __future__ import annotations from typing import Any def __snake_case ( _UpperCAmelCase ): if not postfix_notation: return 0 __a = {'''+''', '''-''', '''*''', '''/'''} __a = [] for token in postfix_notation: if token in operations: __a , __a = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_UpperCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
49
0
"""simple docstring""" from __future__ import annotations from math import gcd def snake_case_ ( A_ : int, A_ : int = 2, A_ : int = 1, A_ : int = 3, ): '''simple docstring''' if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(A_ : int, A_ : int, A_ : int ) -> int: return (pow(A_, 2 ) + step) % modulus for _ in range(A_ ): # These track the position within the cycle detection logic. _lowerCamelCase : Dict = seed _lowerCamelCase : Dict = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. _lowerCamelCase : Union[str, Any] = rand_fn(A_, A_, A_ ) _lowerCamelCase : Optional[int] = rand_fn(A_, A_, A_ ) _lowerCamelCase : str = rand_fn(A_, A_, A_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. _lowerCamelCase : Optional[Any] = gcd(hare - tortoise, A_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. _lowerCamelCase : List[str] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"""{args.num} is probably prime""") else: lowerCAmelCase__ = args.num // divisor print(F"""{args.num} = {divisor} * {quotient}""")
175
"""simple docstring""" def snake_case_ ( A_ : list ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = len(A_ ) for i in range(1, A_ ): _lowerCamelCase : Tuple = collection[i] _lowerCamelCase : Dict = 0 _lowerCamelCase : Any = i - 1 while low <= high: _lowerCamelCase : Optional[int] = (low + high) // 2 if val < collection[mid]: _lowerCamelCase : List[str] = mid - 1 else: _lowerCamelCase : Dict = mid + 1 for j in range(A_, A_, -1 ): _lowerCamelCase : Optional[int] = collection[j - 1] _lowerCamelCase : Tuple = val return collection if __name__ == "__main__": lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
175
1
'''simple docstring''' from math import factorial lowercase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ = 60 , snake_case__ = 100_0000 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length A : int = 0 # the cached sizes of the previous chains A : dict[int, int] = {} for start_chain_element in range(1 , snake_case__ ): # The temporary set will contain the elements of the chain A : int = set() A : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. A : Union[str, Any] = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(snake_case__ ) chain_set_length += 1 A : Tuple = digit_factorial_sum(snake_case__ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] A : Dict = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'''{solution()}''')
3
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ): __a = get_activation('''swish''' ) self.assertIsInstance(_a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def __UpperCAmelCase ( self ): __a = get_activation('''silu''' ) self.assertIsInstance(_a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def __UpperCAmelCase ( self ): __a = get_activation('''mish''' ) self.assertIsInstance(_a , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def __UpperCAmelCase ( self ): __a = get_activation('''gelu''' ) self.assertIsInstance(_a , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
45
0
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __SCREAMING_SNAKE_CASE : List[str] = { '''sample_size''': 3_2, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [3_2, 6_4], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __SCREAMING_SNAKE_CASE : Tuple = { '''sample_size''': 6_4, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_0_0_0, '''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __SCREAMING_SNAKE_CASE : List[Any] = { '''sample_size''': 2_5_6, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], '''attention_head_dim''': 6_4, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __SCREAMING_SNAKE_CASE : List[Any] = { '''num_train_timesteps''': 4_0, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __SCREAMING_SNAKE_CASE : int = { '''num_train_timesteps''': 2_0_1, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __SCREAMING_SNAKE_CASE : Tuple = { '''num_train_timesteps''': 1_5_1, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def lowerCAmelCase_( lowercase_ : str ) -> Any: if isinstance(lowercase_ , lowercase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def lowerCAmelCase_( lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Dict=False ) -> int: _lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCamelCase = checkpoint[F"""{old_prefix}.skip_connection.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def lowerCAmelCase_( lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=None ) -> int: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCamelCase = checkpoint[F"""{old_prefix}.norm.weight"""] _lowerCamelCase = checkpoint[F"""{old_prefix}.norm.bias"""] _lowerCamelCase = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCamelCase = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCamelCase = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> int: _lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' ) _lowerCamelCase = {} _lowerCamelCase = checkpoint['''time_embed.0.weight'''] _lowerCamelCase = checkpoint['''time_embed.0.bias'''] _lowerCamelCase = checkpoint['''time_embed.2.weight'''] _lowerCamelCase = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: _lowerCamelCase = checkpoint['''label_emb.weight'''] _lowerCamelCase = checkpoint['''input_blocks.0.0.weight'''] _lowerCamelCase = checkpoint['''input_blocks.0.0.bias'''] _lowerCamelCase = unet_config['''down_block_types'''] _lowerCamelCase = unet_config['''layers_per_block'''] _lowerCamelCase = unet_config['''attention_head_dim'''] _lowerCamelCase = unet_config['''block_out_channels'''] _lowerCamelCase = 1 _lowerCamelCase = channels_list[0] for i, layer_type in enumerate(lowercase_ ): _lowerCamelCase = channels_list[i] _lowerCamelCase = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowercase_ ): _lowerCamelCase = F"""down_blocks.{i}.resnets.{j}""" _lowerCamelCase = F"""input_blocks.{current_layer}.0""" _lowerCamelCase = True if j == 0 and downsample_block_has_skip else False _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowercase_ ): _lowerCamelCase = F"""down_blocks.{i}.resnets.{j}""" _lowerCamelCase = F"""input_blocks.{current_layer}.0""" _lowerCamelCase = True if j == 0 and downsample_block_has_skip else False _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ ) _lowerCamelCase = F"""down_blocks.{i}.attentions.{j}""" _lowerCamelCase = F"""input_blocks.{current_layer}.1""" _lowerCamelCase = convert_attention( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) current_layer += 1 if i != len(lowercase_ ) - 1: _lowerCamelCase = F"""down_blocks.{i}.downsamplers.0""" _lowerCamelCase = F"""input_blocks.{current_layer}.0""" _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) current_layer += 1 _lowerCamelCase = current_channels # hardcoded the mid-block for now _lowerCamelCase = '''mid_block.resnets.0''' _lowerCamelCase = '''middle_block.0''' _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase = '''mid_block.attentions.0''' _lowerCamelCase = '''middle_block.1''' _lowerCamelCase = convert_attention(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase = '''mid_block.resnets.1''' _lowerCamelCase = '''middle_block.2''' _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase = 0 _lowerCamelCase = unet_config['''up_block_types'''] for i, layer_type in enumerate(lowercase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCamelCase = F"""up_blocks.{i}.resnets.{j}""" _lowerCamelCase = F"""output_blocks.{current_layer}.0""" _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ ) current_layer += 1 if i != len(lowercase_ ) - 1: _lowerCamelCase = F"""up_blocks.{i}.upsamplers.0""" _lowerCamelCase = F"""output_blocks.{current_layer-1}.1""" _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCamelCase = F"""up_blocks.{i}.resnets.{j}""" _lowerCamelCase = F"""output_blocks.{current_layer}.0""" _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_skip=lowercase_ ) _lowerCamelCase = F"""up_blocks.{i}.attentions.{j}""" _lowerCamelCase = F"""output_blocks.{current_layer}.1""" _lowerCamelCase = convert_attention( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) current_layer += 1 if i != len(lowercase_ ) - 1: _lowerCamelCase = F"""up_blocks.{i}.upsamplers.0""" _lowerCamelCase = F"""output_blocks.{current_layer-1}.2""" _lowerCamelCase = convert_resnet(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) _lowerCamelCase = checkpoint['''out.0.weight'''] _lowerCamelCase = checkpoint['''out.0.bias'''] _lowerCamelCase = checkpoint['''out.2.weight'''] _lowerCamelCase = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() __SCREAMING_SNAKE_CASE : Optional[int] = strabool(args.class_cond) __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: __SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __SCREAMING_SNAKE_CASE : int = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __SCREAMING_SNAKE_CASE : int = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config) __SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __SCREAMING_SNAKE_CASE : int = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __SCREAMING_SNAKE_CASE : Dict = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __SCREAMING_SNAKE_CASE : Any = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") __SCREAMING_SNAKE_CASE : Any = CMStochasticIterativeScheduler(**scheduler_config) __SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
73
"""simple docstring""" import argparse from collections import defaultdict def lowerCAmelCase_( lowercase_ : str , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str ) -> Optional[int]: _lowerCamelCase = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(lowercase_ , '''r''' ) as f: _lowerCamelCase = f.readlines() _lowerCamelCase = F"""class {class_name}(""" _lowerCamelCase = F"""{4 * " "}def {test_name}(""" _lowerCamelCase = F"""{8 * " "}{correct_line.split()[0]}""" _lowerCamelCase = F"""{16 * " "}{correct_line.split()[0]}""" _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = 0 _lowerCamelCase = 0 _lowerCamelCase = [] for line in lines: if line.startswith(lowercase_ ): _lowerCamelCase = True elif in_class and line.startswith(lowercase_ ): _lowerCamelCase = True elif in_class and in_func and (line.startswith(lowercase_ ) or line.startswith(lowercase_ )): _lowerCamelCase = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowerCamelCase = True if in_class and in_func and in_line: if ")" not in line: continue else: _lowerCamelCase = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = _lowerCamelCase = False else: new_lines.append(lowercase_ ) with open(lowercase_ , '''w''' ) as f: for line in new_lines: f.write(lowercase_ ) def lowerCAmelCase_( lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> Any: if fail is not None: with open(lowercase_ , '''r''' ) as f: _lowerCamelCase = {l.strip() for l in f.readlines()} else: _lowerCamelCase = None with open(lowercase_ , '''r''' ) as f: _lowerCamelCase = f.readlines() _lowerCamelCase = defaultdict(lowercase_ ) for line in correct_lines: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() main(args.correct_filename, args.fail_filename)
73
1
from __future__ import annotations def __lowerCAmelCase ( a__ , a__ ) -> list[int]: __a = 0 __a = len(a__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: __a = i + 1 else: __a = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
6
"""simple docstring""" from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image _lowercase : Union[str, Any] = ["text", "image", "audio"] def snake_case__ ( __lowerCamelCase : List[str] ): """simple docstring""" lowerCamelCase__ : Union[str, Any] =[] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): inputs.append(create_inputs(__lowerCamelCase ) ) else: raise ValueError(f'''Invalid type requested: {input_type}''' ) return inputs def snake_case__ ( __lowerCamelCase : List ): """simple docstring""" lowerCamelCase__ : Tuple =[] for output in outputs: if isinstance(__lowerCamelCase , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'''Invalid output: {output}''' ) return output_types @is_tool_test class __SCREAMING_SNAKE_CASE : '''simple docstring''' def snake_case ( self : Any )-> Optional[Any]: self.assertTrue(hasattr(self.tool, '''inputs''' ) ) self.assertTrue(hasattr(self.tool, '''outputs''' ) ) lowerCamelCase__ : Tuple =self.tool.inputs for _input in inputs: if isinstance(_input, lowerCamelCase ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) lowerCamelCase__ : Optional[Any] =self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def snake_case ( self : Optional[int] )-> Union[str, Any]: lowerCamelCase__ : Optional[int] =create_inputs(self.tool.inputs ) lowerCamelCase__ : List[Any] =self.tool(*lowerCamelCase ) # There is a single output if len(self.tool.outputs ) == 1: lowerCamelCase__ : Optional[int] =[outputs] self.assertListEqual(output_types(lowerCamelCase ), self.tool.outputs ) def snake_case ( self : Union[str, Any] )-> List[str]: self.assertTrue(hasattr(self.tool, '''description''' ) ) self.assertTrue(hasattr(self.tool, '''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def snake_case ( self : Union[str, Any] )-> str: lowerCamelCase__ : List[str] =create_inputs(self.tool.inputs ) lowerCamelCase__ : Optional[Any] =self.tool(*lowerCamelCase ) if not isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Any =[outputs] self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) ) for output, output_type in zip(lowerCamelCase, self.tool.outputs ): lowerCamelCase__ : List[Any] =AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(lowerCamelCase, lowerCamelCase ) ) def snake_case ( self : Optional[Any] )-> List[Any]: lowerCamelCase__ : Optional[Any] =create_inputs(self.tool.inputs ) lowerCamelCase__ : List[str] =[] for _input, input_type in zip(lowerCamelCase, self.tool.inputs ): if isinstance(lowerCamelCase, lowerCamelCase ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error lowerCamelCase__ : Any =self.tool(*lowerCamelCase ) if not isinstance(lowerCamelCase, lowerCamelCase ): lowerCamelCase__ : Optional[int] =[outputs] self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) )
238
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : Any = BartphoTokenizer A_ : Any = False A_ : int = True def _UpperCAmelCase ( self ) -> int: super().setUp() _a = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] _a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) _a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: _a = '''This is a là test''' _a = '''This is a<unk><unk> test''' return input_text, output_text def _UpperCAmelCase ( self ) -> int: _a = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) _a = '''This is a là test''' _a = '''▁This ▁is ▁a ▁l à ▁t est'''.split() _a = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) _a = tokens + [tokenizer.unk_token] _a = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
153
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
153
1
A__: Optional[Any] = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
149
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def lowerCAmelCase_ ( A_ ,A_ ,A_): UpperCamelCase__: Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") UpperCamelCase__: Dict = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(A_): os.makedirs(A_) UpperCamelCase__: Optional[Any] = model.state_dict() def to_tf_var_name(A_): for patt, repl in iter(A_): UpperCamelCase__: Optional[Any] = name.replace(A_ ,A_) return F"bert/{name}" def create_tf_var(A_ ,A_ ,A_): UpperCamelCase__: Any = tf.dtypes.as_dtype(tensor.dtype) UpperCamelCase__: int = tf.get_variable(dtype=A_ ,shape=tensor.shape ,name=A_ ,initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(A_) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: UpperCamelCase__: List[Any] = to_tf_var_name(A_) UpperCamelCase__: List[str] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): UpperCamelCase__: List[Any] = torch_tensor.T UpperCamelCase__: int = create_tf_var(tensor=A_ ,name=A_ ,session=A_) tf.keras.backend.set_value(A_ ,A_) UpperCamelCase__: Optional[Any] = session.run(A_) print(F"Successfully created {tf_name}: {np.allclose(A_ ,A_)}") UpperCamelCase__: Tuple = tf.train.Saver(tf.trainable_variables()) saver.save(A_ ,os.path.join(A_ ,model_name.replace("-" ,"_") + ".ckpt")) def lowerCAmelCase_ ( A_=None): UpperCamelCase__: Tuple = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=A_ ,required=A_ ,help="model name e.g. bert-base-uncased") parser.add_argument( "--cache_dir" ,type=A_ ,default=A_ ,required=A_ ,help="Directory containing pytorch model") parser.add_argument("--pytorch_model_path" ,type=A_ ,required=A_ ,help="/path/to/<pytorch-model-name>.bin") parser.add_argument("--tf_cache_dir" ,type=A_ ,required=A_ ,help="Directory in which to save tensorflow model") UpperCamelCase__: Any = parser.parse_args(A_) UpperCamelCase__: List[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path) ,cache_dir=args.cache_dir ,) convert_pytorch_checkpoint_to_tf(model=A_ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name) if __name__ == "__main__": main()
149
1
"""simple docstring""" from __future__ import annotations import pandas as pd def lowercase (SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]: SCREAMING_SNAKE_CASE = [0] * no_of_processes SCREAMING_SNAKE_CASE = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = burst_time[i] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 9_99_99_99_99 SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = False # Process until all processes are completed while complete != no_of_processes: for j in range(SCREAMING_SNAKE_CASE_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: SCREAMING_SNAKE_CASE = remaining_time[j] SCREAMING_SNAKE_CASE = j SCREAMING_SNAKE_CASE = True if not check: increment_time += 1 continue remaining_time[short] -= 1 SCREAMING_SNAKE_CASE = remaining_time[short] if minm == 0: SCREAMING_SNAKE_CASE = 9_99_99_99_99 if remaining_time[short] == 0: complete += 1 SCREAMING_SNAKE_CASE = False # Find finish time of current process SCREAMING_SNAKE_CASE = increment_time + 1 # Calculate waiting time SCREAMING_SNAKE_CASE = finish_time - arrival_time[short] SCREAMING_SNAKE_CASE = finar - burst_time[short] if waiting_time[short] < 0: SCREAMING_SNAKE_CASE = 0 # Increment time increment_time += 1 return waiting_time def lowercase (SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] ) -> list[int]: SCREAMING_SNAKE_CASE = [0] * no_of_processes for i in range(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i] return turn_around_time def lowercase (SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> None: SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 0 for i in range(SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i] SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i] print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' ) print('Average turn around time =' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') __UpperCamelCase = int(input()) __UpperCamelCase = [0] * no_of_processes __UpperCamelCase = [0] * no_of_processes __UpperCamelCase = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) __UpperCamelCase,__UpperCamelCase = map(int, input().split()) __UpperCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __UpperCamelCase = burst_time __UpperCamelCase = no_of_processes __UpperCamelCase = waiting_time __UpperCamelCase = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) __UpperCamelCase = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
38
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]: # A local function to see if a dot lands in the circle. def is_in_circle(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> bool: SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle SCREAMING_SNAKE_CASE = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) # The ratio of the area for circle to square is pi/4. SCREAMING_SNAKE_CASE = proportion * 4 print(F'The estimated value of pi is {pi_estimate}' ) print(F'The numpy value of pi is {pi}' ) print(F'The total error is {abs(pi - pi_estimate )}' ) def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 , ) -> float: return mean( function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value) def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ) -> None: def identity_function(SCREAMING_SNAKE_CASE_ : float ) -> float: return x SCREAMING_SNAKE_CASE = area_under_curve_estimator( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {expected_value}' ) print(F'Total error is {abs(estimated_value - expected_value )}' ) print('******************' ) def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> None: def function_to_integrate(SCREAMING_SNAKE_CASE_ : float ) -> float: return sqrt(4.0 - x * x ) SCREAMING_SNAKE_CASE = area_under_curve_estimator( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {pi}' ) print(F'Total error is {abs(estimated_value - pi )}' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
38
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class _lowercase ( pl.LightningModule ): '''simple docstring''' def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]: super().__init__() __SCREAMING_SNAKE_CASE : Optional[Any] = model __SCREAMING_SNAKE_CASE : Tuple = 2 __SCREAMING_SNAKE_CASE : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __magic_name__( self :int ) -> Optional[Any]: pass def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = LongformerModel.from_pretrained(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = LightningModel(lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model __SCREAMING_SNAKE_CASE : List[Any] = LongformerForQuestionAnswering.from_pretrained(lowercase__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(lowercase__ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": __lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : str =parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
9
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = '''▁''' lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : str =BigBirdTokenizer a : Union[str, Any] =BigBirdTokenizerFast a : Tuple =True a : Any =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = "<s>" lowerCAmelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(snake_case__ ) , 1_004 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def lowercase__ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase : Tuple = self.get_tokenizer() lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé." lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ ) lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCAmelCase : List[Any] = self.get_rust_tokenizer() lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ ) lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ ) lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , ) lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def lowercase__ ( self ): """simple docstring""" return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = "Hello World!" lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66] self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase : int = " ".join(snake_case__ ) lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ ) lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ ) lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" ) lowerCAmelCase : Any = BigBirdModel(snake_case__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**snake_case__ ) model(**snake_case__ ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
108
0
'''simple docstring''' import math import sys import cva import numpy as np def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = math.sqrt(lowerCAmelCase ) _lowerCAmelCase = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = np.zeros((kernel_size, kernel_size) ) for i in range(0 , lowerCAmelCase ): for j in range(0 , lowerCAmelCase ): _lowerCAmelCase = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" _lowerCAmelCase = np.zeros(img.shape ) _lowerCAmelCase = get_gauss_kernel(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): _lowerCAmelCase = get_slice(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = img_s - img_s[kernel_size // 2, kernel_size // 2] _lowerCAmelCase = vec_gaussian(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = np.multiply(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = np.multiply(lowerCAmelCase , lowerCAmelCase ) _lowerCAmelCase = np.sum(lowerCAmelCase ) / np.sum(lowerCAmelCase ) _lowerCAmelCase = val return imga def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = args[1] if args[1:] else """../image_data/lena.jpg""" _lowerCAmelCase = float(args[2] ) if args[2:] else 1.0 _lowerCAmelCase = float(args[3] ) if args[3:] else 1.0 if args[4:]: _lowerCAmelCase = int(args[4] ) _lowerCAmelCase = kernel_size + abs(kernel_size % 2 - 1 ) else: _lowerCAmelCase = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": A__ , A__ , A__ , A__ : Tuple =parse_args(sys.argv) A__ : Any =cva.imread(filename, 0) cva.imshow('''input image''', img) A__ : Optional[Any] =img / 2_55 A__ : List[str] =out.astype('''float32''') A__ : Optional[int] =bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) A__ : str =out * 2_55 A__ : str =np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
220
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] _lowerCAmelCase = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } _lowerCAmelCase = f"{src_lang}-{tgt_lang}" _lowerCAmelCase = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) _lowerCAmelCase = os.path.join(lowerCAmelCase , """README.md""" ) print(f"Generating {path}" ) with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(lowerCAmelCase ) # make sure we are under the root of the project A__ : Optional[int] =Path(__file__).resolve().parent.parent.parent A__ : Union[str, Any] =repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: A__ , A__ , A__ : Optional[Any] =model_name.split('''-''') A__ : List[str] =model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
220
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _lowerCamelCase =pytest.mark.integration _lowerCamelCase ={"comet"} _lowerCamelCase =importlib.util.find_spec("fairseq") is not None _lowerCamelCase ={"code_eval"} _lowerCamelCase =os.name == "nt" _lowerCamelCase ={"bertscore", "frugalscore", "perplexity"} _lowerCamelCase =importlib.util.find_spec("transformers") is not None def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @wraps(lowerCAmelCase_ ) def wrapper(self, lowerCAmelCase_ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self, lowerCAmelCase_ ) return wrapper def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @wraps(lowerCAmelCase_ ) def wrapper(self, lowerCAmelCase_ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self, lowerCAmelCase_ ) return wrapper def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @wraps(lowerCAmelCase_ ) def wrapper(self, lowerCAmelCase_ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self, lowerCAmelCase_ ) return wrapper def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @local class a_ ( parameterized.TestCase ): """simple docstring""" __UpperCAmelCase = {} __UpperCAmelCase = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[Any] ): SCREAMING_SNAKE_CASE ='[...]' SCREAMING_SNAKE_CASE =importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' ,snake_case ) ).module_path ) SCREAMING_SNAKE_CASE =datasets.load.import_main_class(metric_module.__name__ ,dataset=snake_case ) # check parameters SCREAMING_SNAKE_CASE =inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(snake_case ,metric_module.__name__ ): with self.use_local_metrics(): try: SCREAMING_SNAKE_CASE =doctest.testmod(snake_case ,verbose=snake_case ,raise_on_error=snake_case ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @slow def _lowerCAmelCase ( self : Optional[int] ,snake_case : List[Any] ): SCREAMING_SNAKE_CASE ='[...]' SCREAMING_SNAKE_CASE =importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' ,snake_case ) ).module_path ) # run doctest with self.use_local_metrics(): SCREAMING_SNAKE_CASE =doctest.testmod(snake_case ,verbose=snake_case ,raise_on_error=snake_case ) self.assertEqual(results.failed ,0 ) self.assertGreater(results.attempted ,1 ) @contextmanager def _lowerCAmelCase ( self : List[str] ,snake_case : int ,snake_case : Optional[int] ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case ): yield else: yield @contextmanager def _lowerCAmelCase ( self : List[Any] ): def load_local_metric(snake_case : int ,*snake_case : Union[str, Any] ,**snake_case : List[str] ): return load_metric(os.path.join('metrics' ,snake_case ) ,*snake_case ,**snake_case ) with patch('datasets.load_metric' ) as mock_load_metric: SCREAMING_SNAKE_CASE =load_local_metric yield @classmethod def _lowerCAmelCase ( cls : Tuple ,snake_case : str ): def wrapper(snake_case : List[Any] ): SCREAMING_SNAKE_CASE =contextmanager(snake_case ) SCREAMING_SNAKE_CASE =patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv', '', '' ) # handle pytest cli flags class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ): assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: SCREAMING_SNAKE_CASE =MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" import torch def bert_cos_score_idf(lowerCAmelCase_, lowerCAmelCase_, *lowerCAmelCase_, **lowerCAmelCase_ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowerCAmelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: SCREAMING_SNAKE_CASE =bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def load_from_checkpoint(lowerCAmelCase_ ): class a_ : """simple docstring""" def _lowerCAmelCase ( self : str ,snake_case : Any ,*snake_case : List[Any] ,**snake_case : List[Any] ): assert len(snake_case ) == 2 SCREAMING_SNAKE_CASE =[0.19, 0.92] return scores, sum(snake_case ) / len(snake_case ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: SCREAMING_SNAKE_CASE =None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: SCREAMING_SNAKE_CASE =load_from_checkpoint yield def snake_case__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE =load_metric(os.path.join('metrics', 'seqeval' ) ) SCREAMING_SNAKE_CASE ='ERROR' SCREAMING_SNAKE_CASE =F'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(lowerCAmelCase_, match=re.escape(lowerCAmelCase_ ) ): metric.compute(predictions=[], references=[], scheme=lowerCAmelCase_ )
334
from __future__ import annotations def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE =sorted(numsa + numsa ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =divmod(len(lowerCAmelCase_ ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase =[float(x) for x in input("Enter the elements of first array: ").split()] _lowerCamelCase =[float(x) for x in input("Enter the elements of second array: ").split()] print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
334
1
'''simple docstring''' def _snake_case ( _SCREAMING_SNAKE_CASE : int = 10**12 ) -> int: """simple docstring""" lowerCAmelCase = 1 lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
187
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters UpperCAmelCase = False UpperCAmelCase = False def _snake_case ( _SCREAMING_SNAKE_CASE : Namespace ) -> Tuple: """simple docstring""" return TrainCommand(_SCREAMING_SNAKE_CASE ) class __snake_case( _lowerCAmelCase ): '''simple docstring''' @staticmethod def __snake_case ( A_ ) -> Optional[int]: lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" , type=A_ , required=A_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , ) train_parser.add_argument( """--column_label""" , type=A_ , default=0 , help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" , type=A_ , default=1 , help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" , type=A_ , default=2 , help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" , type=A_ , default="""""" , help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" , type=A_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , ) train_parser.add_argument("""--output""" , type=A_ , default="""./""" , help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" , type=A_ , default="""text_classification""" , help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" , type=A_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" , type=A_ , default=32 , help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" , type=A_ , default=64 , help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" , type=A_ , default=3e-5 , help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" , type=A_ , default=1e-08 , help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=A_ ) def __init__( self , A_ ) -> Tuple: lowerCAmelCase = logging.get_logger("""transformers-cli/training""" ) lowerCAmelCase = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output , exist_ok=A_ ) lowerCAmelCase = args.output lowerCAmelCase = args.column_label lowerCAmelCase = args.column_text lowerCAmelCase = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) lowerCAmelCase = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowerCAmelCase = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) lowerCAmelCase = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowerCAmelCase = args.validation_split lowerCAmelCase = args.train_batch_size lowerCAmelCase = args.valid_batch_size lowerCAmelCase = args.learning_rate lowerCAmelCase = args.adam_epsilon def __snake_case ( self ) -> Optional[int]: if self.framework == "tf": return self.run_tf() return self.run_torch() def __snake_case ( self ) -> Tuple: raise NotImplementedError def __snake_case ( self ) -> Tuple: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
187
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def _SCREAMING_SNAKE_CASE ( *snake_case ,**snake_case ): '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): _a : str= MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[Any] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowercase : Any = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : str = vqa_pipeline(snake_case ,top_k=1 ) self.assertEqual( snake_case ,[ [{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}], [{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}], ] ,) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowercase : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowercase : int = """How many cats are there?""" lowercase : str = vqa_pipeline(image=snake_case ,question="""How many cats are there?""" ,top_k=2 ) self.assertEqual( snake_case ,[{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}, {"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}] ) lowercase : List[str] = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( snake_case ,[{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}, {"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}] ) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" ) lowercase : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowercase : Optional[int] = """How many cats are there?""" lowercase : Any = vqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowercase : str = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowercase : Any = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass
20
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowercase : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class __snake_case ( lowerCAmelCase ): def __init__( self ,**snake_case ): '''simple docstring''' super().__init__(**snake_case ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self ,snake_case ,**snake_case ): '''simple docstring''' return super().__call__(snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = {} if "candidate_labels" in kwargs: lowercase : List[str] = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowercase : Dict = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case="This is a sound of {}." ): '''simple docstring''' if isinstance(snake_case ,snake_case ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowercase : Optional[Any] = requests.get(snake_case ).content else: with open(snake_case ,"""rb""" ) as f: lowercase : Union[str, Any] = f.read() if isinstance(snake_case ,snake_case ): lowercase : int = ffmpeg_read(snake_case ,self.feature_extractor.sampling_rate ) if not isinstance(snake_case ,np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) lowercase : Dict = self.feature_extractor( [audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" ) lowercase : Tuple = candidate_labels lowercase : Tuple = [hypothesis_template.format(snake_case ) for x in candidate_labels] lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=self.framework ,padding=snake_case ) lowercase : Optional[Any] = [text_inputs] return inputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : List[str] = model_inputs.pop("""candidate_labels""" ) lowercase : Dict = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,snake_case ): lowercase : List[Any] = text_inputs[0] else: # Batching case. lowercase : Dict = text_inputs[0][0] lowercase : Optional[Any] = self.model(**snake_case ,**snake_case ) lowercase : Any = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : List[Any] = model_outputs.pop("""candidate_labels""" ) lowercase : Any = model_outputs["""logits"""][0] if self.framework == "pt": lowercase : Any = logits.softmax(dim=0 ) lowercase : Tuple = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) lowercase : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(snake_case ,snake_case ) ,key=lambda snake_case : -x[0] ) ] return result
20
1
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) snake_case_ : Optional[Any] = str(bin(__a ) )[2:] # remove the leading "0b" snake_case_ : Union[str, Any] = str(bin(__a ) )[2:] # remove the leading "0b" snake_case_ : Optional[Any] = max(len(__a ) , len(__a ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(__a ) , b_binary.zfill(__a ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
360
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : List[Any] = k_size // 2 snake_case_ ,snake_case_ : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center] snake_case_ : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) ) return g def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ ,snake_case_ : Union[str, Any] = image.shape[0], image.shape[1] # dst image height and width snake_case_ : int = height - k_size + 1 snake_case_ : Optional[int] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows snake_case_ : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) ) snake_case_ : Tuple = 0 for i, j in product(range(__a ) , range(__a ) ): snake_case_ : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] ) snake_case_ : str = window row += 1 # turn the kernel into shape(k*k, 1) snake_case_ : List[Any] = gen_gaussian_kernel(__a , __a ) snake_case_ : str = ravel(__a ) # reshape and get the dst image snake_case_ : Optional[int] = dot(__a , __a ).reshape(__a , __a ).astype(__a ) return dst if __name__ == "__main__": # read original image _SCREAMING_SNAKE_CASE = imread(R"""../image_data/lena.jpg""") # turn image in gray scale value _SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _SCREAMING_SNAKE_CASE = gaussian_filter(gray, 3, sigma=1) _SCREAMING_SNAKE_CASE = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
88
0