code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import os import torch from transformers.utils import WEIGHTS_NAME SCREAMING_SNAKE_CASE__ : Any = ["""small""", """medium""", """large"""] SCREAMING_SNAKE_CASE__ : str = """lm_head.decoder.weight""" SCREAMING_SNAKE_CASE__ : str = """lm_head.weight""" def _A ( lowerCamelCase , lowerCamelCase ): a__ : Union[str, Any] = torch.load(lowerCamelCase ) a__ : List[Any] = d.pop(lowerCamelCase ) os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) torch.save(lowerCamelCase , os.path.join(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() for MODEL in DIALOGPT_MODELS: SCREAMING_SNAKE_CASE__ : str = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl') SCREAMING_SNAKE_CASE__ : str = f'./DialoGPT-{MODEL}' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
112
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _A ( lowerCamelCase ): if not is_accelerate_available(): return method a__ : List[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(lowerCamelCase ) < version.parse("0.17.0" ): return method def wrapper(self , *lowerCamelCase , **lowerCamelCase ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *lowerCamelCase , **lowerCamelCase ) return wrapper
112
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = IFImgaImgSuperResolutionPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' return self._get_superresolution_dummy_components() def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any]=0 ) -> str: '''simple docstring''' if str(lowerCAmelCase__ ).startswith("mps" ): snake_case_ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ ) else: snake_case_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _A ( self :str ) -> Tuple: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def _A ( self :Dict ) -> Union[str, Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' self._test_save_load_local() def _A ( self :Dict ) -> Tuple: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
704
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __lowerCamelCase : Any = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = None # source code of `config_class` snake_case_ : List[Any] = inspect.getsource(__magic_name__ ) snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): snake_case_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ : str = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case_ : Dict = ckpt_name break return checkpoint def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ ) snake_case_ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
656
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Union[str, Any] = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class _snake_case ( A__ ): _lowercase : Optional[int] = '''lilt''' def __init__( self , a=3_0522 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=0 , a="absolute" , a=None , a=4 , a=1024 , **a , ) -> Any: super().__init__(pad_token_id=a , **a) SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = position_embedding_type SCREAMING_SNAKE_CASE = classifier_dropout SCREAMING_SNAKE_CASE = channel_shrink_ratio SCREAMING_SNAKE_CASE = max_ad_position_embeddings
73
import heapq as hq import math from collections.abc import Iterator class _snake_case : def __init__( self , a) -> Optional[Any]: SCREAMING_SNAKE_CASE = str(id_) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} # {vertex:distance} def __lt__( self , a) -> Dict: return self.key < other.key def __repr__( self) -> Optional[Any]: return self.id def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]: self.neighbors.append(a) def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple: SCREAMING_SNAKE_CASE = weight def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCAmelCase) graph[b - 1].add_edge(graph[a - 1] , _UpperCAmelCase) def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): SCREAMING_SNAKE_CASE = [] for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = graph[:] while q: SCREAMING_SNAKE_CASE = min(_UpperCAmelCase) q.remove(_UpperCAmelCase) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] for i in range(1 , len(_UpperCAmelCase)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase): for u in graph: SCREAMING_SNAKE_CASE = math.inf SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = list(_UpperCAmelCase) hq.heapify(_UpperCAmelCase) while h: SCREAMING_SNAKE_CASE = hq.heappop(_UpperCAmelCase) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): SCREAMING_SNAKE_CASE = u SCREAMING_SNAKE_CASE = u.edges[v.id] hq.heapify(_UpperCAmelCase) for i in range(1 , len(_UpperCAmelCase)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def lowerCamelCase__ (): pass if __name__ == "__main__": import doctest doctest.testmod()
73
1
from __future__ import annotations from math import gcd def snake_case( __magic_name__ , __magic_name__ = 2 , __magic_name__ = 1 , __magic_name__ = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__magic_name__ , __magic_name__ , __magic_name__ ) -> int: return (pow(__magic_name__ , 2 ) + step) % modulus for _ in range(__magic_name__ ): # These track the position within the cycle detection logic. lowercase : Tuple = seed lowercase : Tuple = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase : Any = rand_fn(__magic_name__ , __magic_name__ , __magic_name__ ) lowercase : List[Any] = rand_fn(__magic_name__ , __magic_name__ , __magic_name__ ) lowercase : Tuple = rand_fn(__magic_name__ , __magic_name__ , __magic_name__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase : Optional[Any] = gcd(hare - tortoise , __magic_name__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase : List[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( 'num', type=int, help='The value to find a divisor of', ) parser.add_argument( '--attempts', type=int, default=3, help='The number of attempts before giving up', ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: lowerCAmelCase_ = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
596
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _A ( _lowerCamelCase ): def __init__( self : Optional[Any] , _A : Callable , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[dict] = None , _A : Optional[int] = None , **_A : List[str] , ) -> Union[str, Any]: """simple docstring""" super().__init__( features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , ) lowercase : int = Generator( cache_dir=_A , features=_A , generator=_A , gen_kwargs=_A , **_A , ) def __a ( self : Optional[int] ) -> Tuple: """simple docstring""" if self.streaming: lowercase : int = self.builder.as_streaming_dataset(split='''train''' ) # Build regular (map-style) dataset else: lowercase : Union[str, Any] = None lowercase : str = None lowercase : Union[str, Any] = None lowercase : Union[str, Any] = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , ) lowercase : Optional[Any] = self.builder.as_dataset( split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset
596
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A__ ( __lowerCAmelCase : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def A__ ( __lowerCAmelCase : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase__ = ord(__lowerCAmelCase ) if not _is_chinese_char(__lowerCAmelCase ): return 0 return 1 def A__ ( __lowerCAmelCase : List[str] ): lowerCamelCase__ = set() for token in tokens: lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase ) if chinese_word: word_set.add(__lowerCAmelCase ) lowerCamelCase__ = list(__lowerCAmelCase ) return word_list def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] ) lowerCamelCase__ = bert_tokens lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase ) while start < end: lowerCamelCase__ = True if is_chinese(bert_word[start] ): lowerCamelCase__ = min(end - start , __lowerCAmelCase ) for i in range(__lowerCAmelCase , 1 , -1 ): lowerCamelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase__ = """##""" + bert_word[j] lowerCamelCase__ = start + i lowerCamelCase__ = False break if single_word: start += 1 return bert_word def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ): lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res] ltp_res.extend(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for i in range(0 , len(__lowerCAmelCase ) , 100 ): lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) lowerCamelCase__ = [] for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ): lowerCamelCase__ = [] for id in input_ids: lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase ) input_tokens.append(__lowerCAmelCase ) lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__lowerCAmelCase ): if token[:2] == "##": lowerCamelCase__ = token[2:] # save chinese tokens' pos if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ): ref_id.append(__lowerCAmelCase ) ref_ids.append(__lowerCAmelCase ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) return ref_ids def A__ ( __lowerCAmelCase : Optional[int] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids] f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCamelCase : Any = parser.parse_args() main(args)
50
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def A__ ( __lowerCAmelCase : dict ): return (data["data"], data["target"]) def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ): lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(__lowerCAmelCase , __lowerCAmelCase ) # Predict target for test data lowerCamelCase__ = xgb.predict(__lowerCAmelCase ) lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 ) return predictions def A__ ( ): lowerCamelCase__ = fetch_california_housing() lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split( __lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 ) lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
50
1
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Tuple = (UnCLIPScheduler,) def _snake_case ( self , **_lowerCAmelCase ) -> Union[str, Any]: _lowerCAmelCase = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**_lowerCAmelCase ) return config def _snake_case ( self ) -> List[str]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def _snake_case ( self ) -> Any: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def _snake_case ( self ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def _snake_case ( self ) -> Dict: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def _snake_case ( self ) -> Optional[Any]: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(variance_type="fixed_small_log" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5 def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config(variance_type="learned_range" ) _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1712790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7998052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0010011 < 1E-5 def _snake_case ( self ) -> Dict: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) _lowerCAmelCase = scheduler.timesteps _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 252.2682495 ) < 1E-2 assert abs(result_mean.item() - 0.3284743 ) < 1E-3 def _snake_case ( self ) -> str: _lowerCAmelCase = self.scheduler_classes[0] _lowerCAmelCase = self.get_scheduler_config() _lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) _lowerCAmelCase = scheduler.timesteps _lowerCAmelCase = self.dummy_model() _lowerCAmelCase = self.dummy_sample_deter _lowerCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual _lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: _lowerCAmelCase = None else: _lowerCAmelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCAmelCase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample _lowerCAmelCase = pred_prev_sample _lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) _lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 258.2044983 ) < 1E-2 assert abs(result_mean.item() - 0.3362038 ) < 1E-3 def _snake_case ( self ) -> Dict: pass def _snake_case ( self ) -> Optional[Any]: pass
489
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = "T5Config" def __a(SCREAMING_SNAKE_CASE_ : jnp.array , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' _lowerCAmelCase = jnp.zeros_like(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) _lowerCAmelCase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return shifted_input_ids class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = "mt5" __lowerCamelCase : Any = MTaConfig class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : List[str] = "mt5" __lowerCamelCase : Dict = MTaConfig class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Optional[Any] = "mt5" __lowerCamelCase : str = MTaConfig
489
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
238
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCamelCase__ : int = HfApi() lowerCamelCase__ : Tuple = {} # fmt: off lowerCamelCase__ : str = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) lowerCamelCase__ : str = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) lowerCamelCase__ : Dict = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) lowerCamelCase__ : Union[str, Any] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) lowerCamelCase__ : str = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) lowerCamelCase__ : Any = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) lowerCamelCase__ : Tuple = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) lowerCamelCase__ : Tuple = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) lowerCamelCase__ : List[Any] = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) lowerCamelCase__ : Union[str, Any] = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) lowerCamelCase__ : Optional[int] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) lowerCamelCase__ : int = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) lowerCamelCase__ : Any = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) lowerCamelCase__ : Any = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) lowerCamelCase__ : Optional[int] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on lowerCamelCase__ : Optional[int] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCamelCase__ : int = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): lowerCamelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCamelCase__ : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCamelCase__ : Union[str, Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCamelCase__ : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
238
1
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _lowercase ( lowerCAmelCase , unittest.TestCase ): _a : Dict = CpmAntTokenizer _a : Dict = False def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" super().setUp() __snake_case : int =[ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] __snake_case : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def _UpperCamelCase ( self : Optional[int] ): """simple docstring""" __snake_case : int =CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) __snake_case : Any ='''今天天气真好!''' __snake_case : Any =['''今天''', '''天气''', '''真''', '''好''', '''!'''] __snake_case : Optional[Any] =tokenizer.tokenize(a ) self.assertListEqual(a , a ) __snake_case : List[Any] ='''今天天气真好!''' __snake_case : int =[tokenizer.bos_token] + tokens __snake_case : Any =[6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) __snake_case : Tuple =tokenizer.decode(a ) self.assertEqual(a , a )
497
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : def __init__( self : str , a : List[str] , a : Any=2 , a : int=8 , a : List[str]=True , a : Union[str, Any]=True , a : Union[str, Any]=True , a : Optional[int]=True , a : Union[str, Any]=9_9 , a : Dict=1_6 , a : Union[str, Any]=5 , a : int=2 , a : List[str]=3_6 , a : int="gelu" , a : Optional[int]=0.0 , a : Any=0.0 , a : Optional[Any]=5_1_2 , a : Tuple=1_6 , a : Dict=2 , a : Union[str, Any]=0.0_2 , a : Dict=3 , a : Union[str, Any]=4 , a : Optional[int]=None , ): """simple docstring""" __snake_case : Any =parent __snake_case : int =batch_size __snake_case : Dict =seq_length __snake_case : Any =is_training __snake_case : Optional[int] =use_input_mask __snake_case : List[Any] =use_token_type_ids __snake_case : List[str] =use_labels __snake_case : Optional[Any] =vocab_size __snake_case : Optional[Any] =hidden_size __snake_case : Optional[Any] =num_hidden_layers __snake_case : Any =num_attention_heads __snake_case : Optional[int] =intermediate_size __snake_case : Dict =hidden_act __snake_case : List[str] =hidden_dropout_prob __snake_case : List[Any] =attention_probs_dropout_prob __snake_case : List[str] =max_position_embeddings __snake_case : Tuple =type_vocab_size __snake_case : Tuple =type_sequence_label_size __snake_case : int =initializer_range __snake_case : Any =num_labels __snake_case : List[str] =num_choices __snake_case : Union[str, Any] =scope def _UpperCamelCase ( self : Dict ): """simple docstring""" __snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int =None if self.use_input_mask: __snake_case : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[Any] =None if self.use_token_type_ids: __snake_case : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : str =None __snake_case : int =None __snake_case : int =None if self.use_labels: __snake_case : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[str] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : List[Any] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self : List[str] ): """simple docstring""" __snake_case : int =self.get_config() __snake_case : Optional[Any] =3_0_0 return config def _UpperCamelCase ( self : Any ): """simple docstring""" ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Tuple =self.prepare_config_and_inputs() __snake_case : Dict =True __snake_case : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __snake_case : Dict =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCamelCase ( self : List[str] , a : List[Any] , a : int , a : Optional[int] , a : List[Any] , a : Tuple , a : Tuple , a : List[Any] ): """simple docstring""" __snake_case : List[Any] =MraModel(config=a ) model.to(a ) model.eval() __snake_case : Union[str, Any] =model(a , attention_mask=a , token_type_ids=a ) __snake_case : Any =model(a , token_type_ids=a ) __snake_case : int =model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : str , a : Tuple , a : Any , a : int , a : Optional[int] , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any] , a : Optional[int] , ): """simple docstring""" __snake_case : Any =True __snake_case : Union[str, Any] =MraModel(a ) model.to(a ) model.eval() __snake_case : Union[str, Any] =model( a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , ) __snake_case : List[Any] =model( a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , ) __snake_case : List[str] =model(a , attention_mask=a , token_type_ids=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self : Tuple , a : Any , a : List[Any] , a : int , a : str , a : Optional[Any] , a : List[str] , a : List[str] ): """simple docstring""" __snake_case : Tuple =MraForMaskedLM(config=a ) model.to(a ) model.eval() __snake_case : str =model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self : List[str] , a : Optional[int] , a : int , a : List[str] , a : int , a : Any , a : Union[str, Any] , a : Union[str, Any] ): """simple docstring""" __snake_case : Optional[Any] =MraForQuestionAnswering(config=a ) model.to(a ) model.eval() __snake_case : Dict =model( a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : Dict , a : Dict , a : Dict , a : Optional[Any] , a : int , a : int , a : List[str] , a : Any ): """simple docstring""" __snake_case : Optional[Any] =self.num_labels __snake_case : Optional[Any] =MraForSequenceClassification(a ) model.to(a ) model.eval() __snake_case : str =model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : Optional[int] , a : Tuple , a : Any , a : Optional[int] , a : Any , a : int , a : str , a : Optional[int] ): """simple docstring""" __snake_case : Tuple =self.num_labels __snake_case : Optional[int] =MraForTokenClassification(config=a ) model.to(a ) model.eval() __snake_case : List[str] =model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : str , a : Tuple , a : Tuple , a : Optional[Any] , a : Dict , a : Dict , a : Dict , a : str ): """simple docstring""" __snake_case : Union[str, Any] =self.num_choices __snake_case : Optional[int] =MraForMultipleChoice(config=a ) model.to(a ) model.eval() __snake_case : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : int =model( a , attention_mask=a , token_type_ids=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self : str ): """simple docstring""" __snake_case : Union[str, Any] =self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[int] =config_and_inputs __snake_case : Tuple ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowercase ( lowerCAmelCase , unittest.TestCase ): _a : Union[str, Any] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _a : int = False _a : Union[str, Any] = False _a : Dict = False _a : Dict = False _a : Any = () def _UpperCamelCase ( self : Optional[int] ): """simple docstring""" __snake_case : Optional[Any] =MraModelTester(self ) __snake_case : List[Any] =ConfigTester(self , config_class=a , hidden_size=3_7 ) def _UpperCamelCase ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def _UpperCamelCase ( self : str ): """simple docstring""" __snake_case : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCamelCase ( self : Any ): """simple docstring""" __snake_case : List[Any] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : Any =type self.model_tester.create_and_check_model(*a ) def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" __snake_case : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def _UpperCamelCase ( self : Dict ): """simple docstring""" __snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a ) def _UpperCamelCase ( self : Dict ): """simple docstring""" __snake_case : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def _UpperCamelCase ( self : int ): """simple docstring""" __snake_case : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def _UpperCamelCase ( self : str ): """simple docstring""" __snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) @slow def _UpperCamelCase ( self : Tuple ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple =MraModel.from_pretrained(a ) self.assertIsNotNone(a ) @unittest.skip(reason='''MRA does not output attentions''' ) def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return @require_torch class _lowercase ( unittest.TestCase ): @slow def _UpperCamelCase ( self : Dict ): """simple docstring""" __snake_case : str =MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __snake_case : Optional[int] =torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): __snake_case : List[str] =model(a )[0] __snake_case : Any =torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape , a ) __snake_case : int =torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : List[str] ): """simple docstring""" __snake_case : Any =MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __snake_case : Union[str, Any] =torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): __snake_case : Optional[int] =model(a )[0] __snake_case : Union[str, Any] =5_0_2_6_5 __snake_case : List[str] =torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape , a ) __snake_case : str =torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) ) @slow def _UpperCamelCase ( self : Tuple ): """simple docstring""" __snake_case : List[Any] =MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __snake_case : Optional[int] =torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): __snake_case : Tuple =model(a )[0] __snake_case : Optional[int] =5_0_2_6_5 __snake_case : Tuple =torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape , a ) __snake_case : List[str] =torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
497
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'BridgeTowerImageProcessor' lowercase_ = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : Dict , a_ : str , a_ : Tuple )-> str: """simple docstring""" super().__init__(a_ , a_ ) def __call__( self : List[Any] , a_ : str , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : List[Any] , )-> BatchEncoding: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) # add pixel_values + pixel_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor( a_ , return_tensors=a_ , do_normalize=a_ , do_center_crop=a_ , **a_ ) encoding.update(a_ ) return encoding def __lowercase( self : Dict , *a_ : Any , **a_ : int )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : str , *a_ : int , **a_ : str )-> Any: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE__ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
85
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json", } class _SCREAMING_SNAKE_CASE ( snake_case ): lowerCamelCase_ = 'data2vec-text' def __init__( self : str , snake_case_ : Dict=3_0522 , snake_case_ : Union[str, Any]=768 , snake_case_ : List[str]=12 , snake_case_ : int=12 , snake_case_ : Any=3072 , snake_case_ : Optional[int]="gelu" , snake_case_ : str=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=0.02 , snake_case_ : str=1E-12 , snake_case_ : Optional[Any]=1 , snake_case_ : Any=0 , snake_case_ : str=2 , snake_case_ : int="absolute" , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=None , **snake_case_ : Any , ): """simple docstring""" super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ ) A : Any = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : Optional[int] = hidden_act A : List[str] = intermediate_size A : Any = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : Any = type_vocab_size A : Optional[int] = initializer_range A : Optional[int] = layer_norm_eps A : str = position_embedding_type A : List[str] = use_cache A : List[str] = classifier_dropout class _SCREAMING_SNAKE_CASE ( snake_case ): @property def _UpperCAmelCase ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
256
0
"""simple docstring""" def A__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCamelCase__ )] _SCREAMING_SNAKE_CASE = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(UpperCamelCase__ ) <= key: return input_string for position, character in enumerate(UpperCamelCase__ ): _SCREAMING_SNAKE_CASE = position % (lowest * 2) # puts it in bounds _SCREAMING_SNAKE_CASE = min(UpperCamelCase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(UpperCamelCase__ ) _SCREAMING_SNAKE_CASE = [''''''.join(UpperCamelCase__ ) for row in temp_grid] _SCREAMING_SNAKE_CASE = ''''''.join(UpperCamelCase__ ) return output_string def A__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string _SCREAMING_SNAKE_CASE = [[] for _ in range(UpperCamelCase__ )] # generates template for position in range(len(UpperCamelCase__ ) ): _SCREAMING_SNAKE_CASE = position % (lowest * 2) # puts it in bounds _SCREAMING_SNAKE_CASE = min(UpperCamelCase__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) _SCREAMING_SNAKE_CASE = 0 for row in temp_grid: # fills in the characters _SCREAMING_SNAKE_CASE = input_string[counter : counter + len(UpperCamelCase__ )] grid.append(list(UpperCamelCase__ ) ) counter += len(UpperCamelCase__ ) _SCREAMING_SNAKE_CASE = '''''' # reads as zigzag for position in range(len(UpperCamelCase__ ) ): _SCREAMING_SNAKE_CASE = position % (lowest * 2) # puts it in bounds _SCREAMING_SNAKE_CASE = min(UpperCamelCase__ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def A__ ( UpperCamelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE = {} for key_guess in range(1 , len(UpperCamelCase__ ) ): # tries every key _SCREAMING_SNAKE_CASE = decrypt(UpperCamelCase__ , UpperCamelCase__ ) return results if __name__ == "__main__": import doctest doctest.testmod()
168
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCamelCase : str = _symbol_database.Default() lowerCamelCase : Any = _descriptor_pool.Default().AddSerializedFile( b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03""" ) lowerCamelCase : List[str] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCamelCase : List[str] = None lowerCamelCase : List[str] = b"""H\003""" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCamelCase : Optional[int] = 4_5 lowerCamelCase : Tuple = 1_5_8_1 lowerCamelCase : Optional[int] = 1_5_1_7 lowerCamelCase : List[Any] = 1_5_7_0 lowerCamelCase : Dict = 1_5_8_4 lowerCamelCase : Dict = 1_7_9_3 lowerCamelCase : Optional[Any] = 1_7_9_5 lowerCamelCase : List[Any] = 1_9_1_6 lowerCamelCase : int = 1_8_6_4 lowerCamelCase : int = 1_9_0_5 lowerCamelCase : Dict = 1_9_1_9 lowerCamelCase : str = 2_4_2_9 lowerCamelCase : str = 2_2_0_8 lowerCamelCase : int = 2_4_1_8 lowerCamelCase : Dict = 2_3_2_3 lowerCamelCase : int = 2_4_0_7 # @@protoc_insertion_point(module_scope)
168
1
"""simple docstring""" def lowercase (snake_case__ : list[int] , snake_case__ : int ) -> bool: '''simple docstring''' lowerCAmelCase = len(snake_case__ ) lowerCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCAmelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCAmelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCAmelCase = subset[i - 1][j] if arr[i - 1] <= j: lowerCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
169
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
169
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any]=7 , __lowercase : Any=3 , __lowercase : Dict=18 , __lowercase : List[Any]=30 , __lowercase : Union[str, Any]=4_00 , __lowercase : Tuple=True , __lowercase : Dict=None , __lowercase : Optional[int]=True , ): '''simple docstring''' UpperCAmelCase_ = size if size is not None else {"""height""": 18, """width""": 18} UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = apply_ocr def SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _UpperCamelCase ( A_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def SCREAMING_SNAKE_CASE ( self : Any ): '''simple docstring''' UpperCAmelCase_ = LayoutLMvaImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , """do_resize""" ) ) self.assertTrue(hasattr(__A , """size""" ) ) self.assertTrue(hasattr(__A , """apply_ocr""" ) ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , __A ) self.assertIsInstance(encoding.boxes , __A ) # Test batched UpperCAmelCase_ = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched UpperCAmelCase_ = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched UpperCAmelCase_ = image_processing(__A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def SCREAMING_SNAKE_CASE ( self : str ): '''simple docstring''' UpperCAmelCase_ = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCAmelCase_ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) UpperCAmelCase_ = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) UpperCAmelCase_ = image_processing(__A , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCAmelCase_ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 UpperCAmelCase_ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __A ) self.assertListEqual(encoding.boxes , __A ) # with apply_OCR = False UpperCAmelCase_ = LayoutLMvaImageProcessor(apply_ocr=__A ) UpperCAmelCase_ = image_processing(__A , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
718
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy UpperCamelCase__ : List[str] = logging.get_logger(__name__) class _UpperCamelCase ( A_ ): '''simple docstring''' def __init__( self : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : float , **__lowercase : Dict ): '''simple docstring''' UpperCAmelCase_ = feature_size UpperCAmelCase_ = sampling_rate UpperCAmelCase_ = padding_value UpperCAmelCase_ = kwargs.pop("""padding_side""" , """right""" ) UpperCAmelCase_ = kwargs.pop("""return_attention_mask""" , __lowercase ) super().__init__(**__lowercase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowercase : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , __lowercase : Union[bool, str, PaddingStrategy] = True , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[str, TensorType]] = None , ): '''simple docstring''' if isinstance(__lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): UpperCAmelCase_ = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( """You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`""" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) UpperCAmelCase_ = processed_features[self.model_input_names[0]] UpperCAmelCase_ = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__lowercase ) == 0: if return_attention_mask: UpperCAmelCase_ = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ = required_input[0] if isinstance(__lowercase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__lowercase ): UpperCAmelCase_ = required_input[index][0] if return_tensors is None: if is_tf_tensor(__lowercase ): UpperCAmelCase_ = """tf""" elif is_torch_tensor(__lowercase ): UpperCAmelCase_ = """pt""" elif isinstance(__lowercase , (int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ = """np""" else: raise ValueError( F"""type of {first_element} unknown: {type(__lowercase )}. """ """Should be one of a python, numpy, pytorch or tensorflow object.""" ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): UpperCAmelCase_ = to_numpy(__lowercase ) else: UpperCAmelCase_ = [to_numpy(__lowercase ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ = self._get_padding_strategies(padding=__lowercase , max_length=__lowercase ) UpperCAmelCase_ = processed_features[self.model_input_names[0]] UpperCAmelCase_ = len(__lowercase ) if not all(len(__lowercase ) == batch_size for v in processed_features.values() ): raise ValueError("""Some items in the output dictionary have a different batch size than others.""" ) UpperCAmelCase_ = [] for i in range(__lowercase ): UpperCAmelCase_ = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ = self._truncate( __lowercase , max_length=__lowercase , pad_to_multiple_of=__lowercase , truncation=__lowercase , ) truncated_inputs.append(__lowercase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ = {} for i in range(__lowercase ): # padding UpperCAmelCase_ = self._pad( truncated_inputs[i] , max_length=__lowercase , padding_strategy=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , ) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ = value.astype(np.floataa ) batch_outputs[key].append(__lowercase ) return BatchFeature(__lowercase , tensor_type=__lowercase ) def SCREAMING_SNAKE_CASE ( self : str , __lowercase : Union[Dict[str, np.ndarray], BatchFeature] , __lowercase : Optional[int] = None , __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , ): '''simple docstring''' UpperCAmelCase_ = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ = len(__lowercase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowercase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ = np.ones(len(__lowercase ) , dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ = max_length - len(__lowercase ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ = np.pad( processed_features["""attention_mask"""] , (0, difference) ) UpperCAmelCase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ = np.pad( __lowercase , __lowercase , """constant""" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ = np.pad( processed_features["""attention_mask"""] , (difference, 0) ) UpperCAmelCase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ = np.pad( __lowercase , __lowercase , """constant""" , constant_values=self.padding_value ) else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return processed_features def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowercase : Union[Dict[str, np.ndarray], BatchFeature] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , ): '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" ) UpperCAmelCase_ = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ = len(__lowercase ) > max_length if needs_to_be_truncated: UpperCAmelCase_ = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ = processed_features["""attention_mask"""][:max_length] return processed_features def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowercase : int=False , __lowercase : List[str]=None ): '''simple docstring''' if padding is not False: if padding is True: UpperCAmelCase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__lowercase , __lowercase ): UpperCAmelCase_ = PaddingStrategy(__lowercase ) elif isinstance(__lowercase , __lowercase ): UpperCAmelCase_ = padding else: UpperCAmelCase_ = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( """Asking to pad but the feature_extractor does not have a padding value. Please select a value to use""" """ as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" ) return padding_strategy
486
0
import os import sys import unittest __lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase = os.path.join(git_repo_path, "src", "diffusers") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = find_backend(" if not is_torch_available():" ) self.assertEqual(UpperCamelCase_ , "torch" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") _UpperCAmelCase = find_backend(" if not (is_torch_available() and is_transformers_available()):" ) self.assertEqual(UpperCamelCase_ , "torch_and_transformers" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") _UpperCAmelCase = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" ) self.assertEqual(UpperCamelCase_ , "torch_and_transformers_and_onnx" ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase_ ) self.assertIn("torch_and_transformers" , UpperCamelCase_ ) self.assertIn("flax_and_transformers" , UpperCamelCase_ ) self.assertIn("torch_and_transformers_and_onnx" , UpperCamelCase_ ) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"] ) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] ) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] ) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] ) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] ) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = create_dummy_object("CONSTANT" , "\'torch\'" ) self.assertEqual(UpperCamelCase_ , "\nCONSTANT = None\n" ) _UpperCAmelCase = create_dummy_object("function" , "\'torch\'" ) self.assertEqual( UpperCamelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" ) _UpperCAmelCase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n" _UpperCAmelCase = create_dummy_object("FakeClass" , "\'torch\'" ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" _UpperCAmelCase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase_ )
684
'''simple docstring''' from __future__ import annotations a__ : Optional[int] = list[tuple[int, int]] a__ : List[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a__ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class __snake_case : def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any: snake_case__ = pos_x snake_case__ = pos_y snake_case__ = (pos_y, pos_x) snake_case__ = goal_x snake_case__ = goal_y snake_case__ = g_cost snake_case__ = parent snake_case__ = self.calculate_heuristic() def _snake_case ( self ) -> float: snake_case__ = abs(self.pos_x - self.goal_x ) snake_case__ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , UpperCamelCase_ ) -> bool: return self.f_cost < other.f_cost class __snake_case : def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any: snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ ) snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase_ ) snake_case__ = [self.start] snake_case__ = [] snake_case__ = False def _snake_case ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: snake_case__ = True return self.retrace_path(UpperCamelCase_ ) self.closed_nodes.append(UpperCamelCase_ ) snake_case__ = self.get_successors(UpperCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(UpperCamelCase_ ) else: # retrieve the best current path snake_case__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(UpperCamelCase_ ) else: self.open_nodes.append(UpperCamelCase_ ) if not self.reached: return [self.start.pos] return None def _snake_case ( self , UpperCamelCase_ ) -> list[Node]: snake_case__ = [] for action in delta: snake_case__ = parent.pos_x + action[1] snake_case__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) ) return successors def _snake_case ( self , UpperCamelCase_ ) -> Path: snake_case__ = node snake_case__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ = current_node.parent path.reverse() return path if __name__ == "__main__": a__ : List[str] = (0, 0) a__ : Dict = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') a__ : Optional[int] = GreedyBestFirst(init, goal) a__ : Optional[int] = greedy_bf.search() if path: for pos_x, pos_y in path: a__ : Tuple = 2 for elem in grid: print(elem)
368
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available snake_case_ : Dict = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
0
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
231
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCamelCase :Union[str, Any] = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :int = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase :Tuple = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
667
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class UpperCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[str] = "bridgetower_vision_model" def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase=288 , _lowerCAmelCase=1 , _lowerCAmelCase=1E-05 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) a =hidden_size a =num_hidden_layers a =num_channels a =patch_size a =image_size a =initializer_factor a =layer_norm_eps a =stop_gradient a =share_layernorm a =remove_last_layer @classmethod def lowerCAmelCase__ ( cls , _lowerCAmelCase , **_lowerCAmelCase ): a , a =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) if config_dict.get("""model_type""" ) == "bridgetower": a =config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class UpperCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Optional[int] = "bridgetower_text_model" def __init__( self , _lowerCAmelCase=50_265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=1 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=514 , _lowerCAmelCase=1 , _lowerCAmelCase=1E-05 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) a =vocab_size a =hidden_size a =num_hidden_layers a =num_attention_heads a =hidden_act a =initializer_factor a =intermediate_size a =hidden_dropout_prob a =attention_probs_dropout_prob a =max_position_embeddings a =type_vocab_size a =layer_norm_eps a =position_embedding_type a =use_cache a =pad_token_id a =bos_token_id a =eos_token_id @classmethod def lowerCAmelCase__ ( cls , _lowerCAmelCase , **_lowerCAmelCase ): a , a =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase ) if config_dict.get("""model_type""" ) == "bridgetower": a =config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase ) class UpperCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Any = "bridgetower" def __init__( self , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=768 , _lowerCAmelCase=1 , _lowerCAmelCase=1E-05 , _lowerCAmelCase=False , _lowerCAmelCase="add" , _lowerCAmelCase=12 , _lowerCAmelCase=6 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ): # TODO: remove this once the Hub files are updated. a =kwargs.pop("""text_config_dict""" , _lowerCAmelCase ) a =kwargs.pop("""vision_config_dict""" , _lowerCAmelCase ) super().__init__(**_lowerCAmelCase ) a =share_cross_modal_transformer_layers a =hidden_act a =hidden_size a =initializer_factor a =layer_norm_eps a =share_link_tower_layers a =link_tower_type a =num_attention_heads a =num_hidden_layers a =tie_word_embeddings a =init_layernorm_from_vision_encoder if text_config is None: a ={} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: a ={} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) a =BridgeTowerTextConfig(**_lowerCAmelCase ) a =BridgeTowerVisionConfig(**_lowerCAmelCase ) @classmethod def lowerCAmelCase__ ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase ) def lowerCAmelCase__ ( self ): a =copy.deepcopy(self.__dict__ ) a =self.text_config.to_dict() a =self.vision_config.to_dict() a =self.__class__.model_type return output
321
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]), ({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]), ({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]), ({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int )-> int: """simple docstring""" a =_distribute_shards(**UpperCAmelCase_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 10, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any )-> Any: """simple docstring""" a =_split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> int: """simple docstring""" if expected is RuntimeError: with pytest.raises(UpperCAmelCase_ ): _number_of_shards_in_gen_kwargs(UpperCAmelCase_ ) else: a =_number_of_shards_in_gen_kwargs(UpperCAmelCase_ ) assert out == expected
321
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) def A ( snake_case__ : Dict ) -> List[str]: '''simple docstring''' if isinstance(snake_case__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(snake_case__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(snake_case__ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: super().__init__(**_SCREAMING_SNAKE_CASE) __snake_case = size if size is not None else {'''shortest_edge''': 2_2_4} __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) __snake_case = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size') __snake_case = do_resize __snake_case = size __snake_case = do_center_crop __snake_case = crop_size __snake_case = resample __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray: __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) if "shortest_edge" in size: __snake_case = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE) elif "height" in size and "width" in size: __snake_case = (size['''height'''], size['''width''']) else: raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}") return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE) if "height" not in size or "width" not in size: raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}") return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> int: return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case = to_numpy_array(_SCREAMING_SNAKE_CASE) if do_resize: __snake_case = self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) if do_center_crop: __snake_case = self.center_crop(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE) if do_rescale: __snake_case = self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE) if do_normalize: __snake_case = self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE) __snake_case = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) return image def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = size if size is not None else self.size __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE) __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size') if not valid_images(_SCREAMING_SNAKE_CASE): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') __snake_case = make_batched(_SCREAMING_SNAKE_CASE) __snake_case = [ [ self._preprocess_image( image=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE , rescale_factor=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , ) for img in video ] for video in videos ] __snake_case = {'''pixel_values''': videos} return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE)
313
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Dict: '''simple docstring''' super().tearDown() gc.collect() def _snake_case ( self )->Optional[int]: '''simple docstring''' A_ , A_ : List[Any] = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) A_ , A_ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) A_ : int = controlnet_params A_ : Union[str, Any] = '''bird''' A_ : Any = jax.device_count() A_ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) A_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) A_ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples ) A_ : Dict = jax.random.PRNGKey(0 ) A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) A_ : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE ) A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE ) A_ : Dict = shard(_SCREAMING_SNAKE_CASE ) A_ : Any = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ : List[Any] = images[0, 253:256, 253:256, -1] A_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ : Union[str, Any] = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _snake_case ( self )->List[str]: '''simple docstring''' A_ , A_ : Optional[int] = FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) A_ , A_ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) A_ : str = controlnet_params A_ : Tuple = '''Chef in the kitchen''' A_ : List[str] = jax.device_count() A_ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) A_ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) A_ : List[Any] = pipe.prepare_image_inputs([pose_image] * num_samples ) A_ : List[str] = jax.random.PRNGKey(0 ) A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) A_ : Dict = replicate(_SCREAMING_SNAKE_CASE ) A_ : List[Any] = shard(_SCREAMING_SNAKE_CASE ) A_ : Any = shard(_SCREAMING_SNAKE_CASE ) A_ : List[str] = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ : int = images[0, 253:256, 253:256, -1] A_ : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ : Optional[Any] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
590
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): A : int = {'''add_prefix_space''': True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(''' ''' ) else {} A : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding='''max_length''' if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): A : Tuple = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) -> List[str]: super().__init__() A : List[str] = Path(__UpperCAmelCase ).joinpath(type_path + '''.source''' ) A : str = Path(__UpperCAmelCase ).joinpath(type_path + '''.target''' ) A : List[str] = self.get_char_lens(self.src_file ) A : Any = max_source_length A : str = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A : Dict = tokenizer A : Tuple = prefix if n_obs is not None: A : Union[str, Any] = self.src_lens[:n_obs] A : Optional[Any] = src_lang A : Any = tgt_lang def __len__( self ) -> Dict: return len(self.src_lens ) def __getitem__( self , __UpperCAmelCase ) -> Dict[str, torch.Tensor]: A : List[str] = index + 1 # linecache starts at 1 A : List[str] = self.prefix + linecache.getline(str(self.src_file ) , __UpperCAmelCase ).rstrip('''\n''' ) A : Dict = linecache.getline(str(self.tgt_file ) , __UpperCAmelCase ).rstrip('''\n''' ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer , __UpperCAmelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A : Optional[Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer ) A : Any = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer A : List[str] = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , '''right''' ) A : str = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , '''right''' ) A : int = source_inputs['''input_ids'''].squeeze() A : Any = target_inputs['''input_ids'''].squeeze() A : Union[str, Any] = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case ( __UpperCAmelCase ) -> Union[str, Any]: return [len(__UpperCAmelCase ) for x in Path(__UpperCAmelCase ).open().readlines()] def snake_case ( self , __UpperCAmelCase ) -> Dict[str, torch.Tensor]: A : Tuple = torch.stack([x['''input_ids'''] for x in batch] ) A : Dict = torch.stack([x['''attention_mask'''] for x in batch] ) A : Dict = torch.stack([x['''decoder_input_ids'''] for x in batch] ) A : List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer.pad_token_id ) A : Optional[int] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer.pad_token_id ) A : Optional[int] = trim_batch(__UpperCAmelCase , __UpperCAmelCase ) A , A : Union[str, Any] = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase ) A : Any = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch lowercase : Optional[Any] = getLogger(__name__) def snake_case__ ( lowerCamelCase_ ): return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def snake_case__ ( lowerCamelCase_ ): A : Optional[Any] = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''git_log.json''' ) ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): with open(lowerCamelCase_ , '''w''' ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def snake_case__ ( lowerCamelCase_ ): with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def snake_case__ ( ): A : Any = git.Repo(search_parent_directories=lowerCamelCase_ ) A : Dict = { '''repo_id''': str(lowerCamelCase_ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), '''hostname''': str(socket.gethostname() ), } return repo_infos def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): with open(lowerCamelCase_ , '''wb''' ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def snake_case__ ( lowerCamelCase_ ): def remove_articles(lowerCamelCase_ ): return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): A : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): A : List[str] = normalize_answer(lowerCamelCase_ ).split() A : str = normalize_answer(lowerCamelCase_ ).split() A : List[str] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) A : Union[str, Any] = sum(common.values() ) if num_same == 0: return 0 A : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) A : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) A : Union[str, Any] = (2 * precision * recall) / (precision + recall) return fa def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ): assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) A : int = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def snake_case__ ( lowerCamelCase_ ): return model_prefix.startswith('''rag''' ) def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): A : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A : str = '''dropout_rate''' for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info('''config doesn\'t have a `{}` attribute'''.format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue A : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
423
class __lowercase : """simple docstring""" def __init__( self ) -> Optional[Any]: A : Tuple = {} def snake_case ( self ) -> None: print(self.vertex ) for i in self.vertex: print(__UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCAmelCase ) for j in self.vertex[i]] ) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__UpperCAmelCase ) else: # else make a new vertex A : str = [to_vertex] def snake_case ( self ) -> None: # visited array for storing already visited nodes A : int = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # mark start vertex as visited A : List[Any] = True print(__UpperCAmelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": lowercase : Dict = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
423
1
import sys from collections import defaultdict class __UpperCAmelCase : """simple docstring""" def __init__( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = [] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.node_position[vertex] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = pos def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: UpperCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: UpperCamelCase = 2 * start + 1 else: UpperCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: UpperCamelCase , UpperCamelCase = heap[smallest_child], positions[smallest_child] UpperCamelCase , UpperCamelCase = ( heap[start], positions[start], ) UpperCamelCase , UpperCamelCase = temp, tempa UpperCamelCase = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE ) self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase = position[index] while index != 0: UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: UpperCamelCase = heap[parent] UpperCamelCase = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE ) else: UpperCamelCase = val UpperCamelCase = temp self.set_position(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) break UpperCamelCase = parent else: UpperCamelCase = val UpperCamelCase = temp self.set_position(SCREAMING_SNAKE_CASE , 0 ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" UpperCamelCase = len(SCREAMING_SNAKE_CASE ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" UpperCamelCase = positions[0] UpperCamelCase = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) return temp def __magic_name__ ( lowercase_ ) -> str: '''simple docstring''' UpperCamelCase = Heap() UpperCamelCase = [0] * len(lowercase_ ) UpperCamelCase = [-1] * len(lowercase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex UpperCamelCase = [] for vertex in range(len(lowercase_ ) ): distance_tv.append(sys.maxsize ) positions.append(lowercase_ ) heap.node_position.append(lowercase_ ) UpperCamelCase = [] UpperCamelCase = 1 UpperCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: UpperCamelCase = 0 UpperCamelCase = distance heap.heapify(lowercase_ , lowercase_ ) for _ in range(1 , len(lowercase_ ) ): UpperCamelCase = heap.delete_minimum(lowercase_ , lowercase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) UpperCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(lowercase_ )] ): UpperCamelCase = distance heap.bottom_to_top( lowercase_ , heap.get_position(lowercase_ ) , lowercase_ , lowercase_ ) UpperCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > __a : Tuple = int(input("""Enter number of edges: """).strip()) __a : List[str] = defaultdict(list) for _ in range(edges_number): __a : int = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
606
from collections.abc import Iterable from typing import Generic, TypeVar __a : Optional[Any] = TypeVar("""_T""") class __UpperCAmelCase ( Generic[_T] ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE = None ) -> None: """simple docstring""" UpperCamelCase = list(iterable or [] ) UpperCamelCase = [] def __len__( self ) -> int: """simple docstring""" return len(self._stacka ) + len(self._stacka ) def __repr__( self ) -> str: """simple docstring""" return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})''' def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" self._stacka.append(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> _T: """simple docstring""" UpperCamelCase = self._stacka.pop UpperCamelCase = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("Queue is empty" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
606
1
from __future__ import annotations from collections.abc import Callable SCREAMING_SNAKE_CASE : int = list[list[float | int]] def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Matrix: _lowercase : int = len(lowerCamelCase_ ) _lowercase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCamelCase_ )] _lowercase : int _lowercase : int _lowercase : int _lowercase : int _lowercase : int _lowercase : float for row in range(lowerCamelCase_ ): for col in range(lowerCamelCase_ ): _lowercase : Optional[Any] = matrix[row][col] _lowercase : Union[str, Any] = vector[row][0] _lowercase : List[Any] = 0 _lowercase : Tuple = 0 while row < size and col < size: # pivoting _lowercase : List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCamelCase_ , lowerCamelCase_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _lowercase : Optional[int] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCamelCase_ ): _lowercase : Optional[Any] = augmented[rowa][col] / augmented[row][col] _lowercase : Optional[int] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCamelCase_ ): for row in range(lowerCamelCase_ ): _lowercase : Union[str, Any] = augmented[row][col] / augmented[col][col] for cola in range(lowerCamelCase_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCamelCase_ ) ] def UpperCamelCase_( lowerCamelCase_ ) -> Callable[[int], int]: _lowercase : int = len(lowerCamelCase_ ) _lowercase : Matrix = [[0 for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )] _lowercase : Matrix = [[0] for _ in range(lowerCamelCase_ )] _lowercase : Matrix _lowercase : int _lowercase : int _lowercase : int for x_val, y_val in enumerate(lowerCamelCase_ ): for col in range(lowerCamelCase_ ): _lowercase : Optional[int] = (x_val + 1) ** (size - col - 1) _lowercase : Dict = y_val _lowercase : Optional[Any] = solve(lowerCamelCase_ , lowerCamelCase_ ) def interpolated_func(lowerCamelCase_ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(lowerCamelCase_ ) ) return interpolated_func def UpperCamelCase_( lowerCamelCase_ ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCamelCase_( lowerCamelCase_ = question_function , lowerCamelCase_ = 10 ) -> int: _lowercase : list[int] = [func(lowerCamelCase_ ) for x_val in range(1 , order + 1 )] _lowercase : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _lowercase : int = 0 _lowercase : Callable[[int], int] _lowercase : int for poly in polynomials: _lowercase : str = 1 while func(lowerCamelCase_ ) == poly(lowerCamelCase_ ): x_val += 1 ret += poly(lowerCamelCase_ ) return ret if __name__ == "__main__": print(F"{solution() = }")
717
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase( _a ): def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase="None", lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> Optional[Any]: """simple docstring""" _lowercase : Union[str, Any] = parent _lowercase : Optional[Any] = batch_size _lowercase : Optional[Any] = seq_length _lowercase : Dict = is_training _lowercase : Optional[Any] = use_input_mask _lowercase : Optional[int] = use_token_type_ids _lowercase : str = use_labels _lowercase : List[Any] = vocab_size _lowercase : Dict = hidden_size _lowercase : Any = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : int = intermediate_size _lowercase : List[str] = hidden_act _lowercase : Tuple = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : Any = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : Optional[Any] = num_labels _lowercase : Tuple = num_choices _lowercase : Dict = relative_attention _lowercase : Optional[int] = position_biased_input _lowercase : str = pos_att_type _lowercase : Optional[Any] = scope def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowercase : Union[str, Any] = None if self.use_input_mask: _lowercase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowercase : Union[str, Any] = None _lowercase : Tuple = None _lowercase : str = None if self.use_labels: _lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowercase : str = ids_tensor([self.batch_size], self.num_choices) _lowercase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" return DebertaVaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, ) def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" self.parent.assertListEqual(list(result.loss.size()), []) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : List[str] = DebertaVaModel(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)[0] _lowercase : Optional[int] = model(lowerCamelCase, token_type_ids=lowerCamelCase)[0] _lowercase : Dict = model(lowerCamelCase)[0] self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size]) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]: """simple docstring""" _lowercase : List[Any] = DebertaVaForMaskedLM(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : Optional[int] = self.num_labels _lowercase : Any = DebertaVaForSequenceClassification(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase) self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels]) self.check_loss_output(lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]: """simple docstring""" _lowercase : Union[str, Any] = self.num_labels _lowercase : Optional[int] = DebertaVaForTokenClassification(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : Union[str, Any] = DebertaVaForQuestionAnswering(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Union[str, Any] = model( lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]: """simple docstring""" _lowercase : Any = DebertaVaForMultipleChoice(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[int] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : List[Any] = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : str = model( lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Any = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[str] = config_and_inputs _lowercase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCamelCase( _a, _a, unittest.TestCase ): lowercase_ : Any = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) lowercase_ : Any = ( { """feature-extraction""": DebertaVaModel, """fill-mask""": DebertaVaForMaskedLM, """question-answering""": DebertaVaForQuestionAnswering, """text-classification""": DebertaVaForSequenceClassification, """token-classification""": DebertaVaForTokenClassification, """zero-shot""": DebertaVaForSequenceClassification, } if is_torch_available() else {} ) lowercase_ : int = True lowercase_ : str = False lowercase_ : str = False lowercase_ : str = False lowercase_ : List[Any] = False def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : List[Any] = DebertaVaModelTester(self) _lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37) def UpperCamelCase ( self) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase) @slow def UpperCamelCase ( self) -> List[str]: """simple docstring""" for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Dict = DebertaVaModel.from_pretrained(lowerCamelCase) self.assertIsNotNone(lowerCamelCase) @require_torch @require_sentencepiece @require_tokenizers class _lowerCamelCase( unittest.TestCase ): @unittest.skip(reason='Model not available yet') def UpperCamelCase ( self) -> Tuple: """simple docstring""" pass @slow def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Dict = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge') _lowercase : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]]) _lowercase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _lowercase : Tuple = model(lowerCamelCase, attention_mask=lowerCamelCase)[0] # compare the actual values for a slice. _lowercase : int = torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4), F'''{output[:, 1:4, 1:4]}''')
354
0
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Dict = (PNDMScheduler,) snake_case__ : Optional[int] = (('''num_inference_steps''', 50),) def SCREAMING_SNAKE_CASE ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: a_ : Dict = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**SCREAMING_SNAKE_CASE__ ) return config def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> int: a_ : Tuple = dict(self.forward_default_kwargs ) a_ : Any = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ ) a_ : Any = self.dummy_sample a_ : str = 0.1 * sample a_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a_ : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) a_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # copy over dummy past residuals a_ : int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE__ ) a_ : Any = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # copy over dummy past residuals a_ : Optional[int] = dummy_past_residuals[:] a_ : List[str] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : List[str] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a_ : Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : List[str] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: pass def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=0 , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: a_ : Optional[int] = dict(self.forward_default_kwargs ) a_ : Optional[Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.dummy_sample a_ : str = 0.1 * sample a_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a_ : Dict = self.get_scheduler_config() a_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # copy over dummy past residuals (must be after setting timesteps) a_ : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # copy over dummy past residual (must be after setting timesteps) a_ : Tuple = dummy_past_residuals[:] a_ : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : Tuple = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a_ : List[str] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : Tuple = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: a_ : Optional[int] = self.scheduler_classes[0] a_ : str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) a_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : Dict = 1_0 a_ : List[Any] = self.dummy_model() a_ : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for i, t in enumerate(scheduler.prk_timesteps ): a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Dict = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): a_ : Tuple = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: a_ : Tuple = dict(self.forward_default_kwargs ) a_ : Optional[int] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ ) for scheduler_class in self.scheduler_classes: a_ : List[str] = self.get_scheduler_config() a_ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self.dummy_sample a_ : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ): a_ : Any = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a_ : List[Any] = dummy_past_residuals[:] a_ : Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : Dict = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a_ : Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample a_ : Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ , 1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = self.scheduler_classes[0] a_ : Tuple = self.get_scheduler_config(steps_offset=1 ) a_ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def SCREAMING_SNAKE_CASE ( self : str ) -> int: for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: for t in [1, 5, 1_0]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Any: # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 a_ : Union[str, Any] = 2_7 for scheduler_class in self.scheduler_classes: a_ : List[str] = self.dummy_sample a_ : Tuple = 0.1 * sample a_ : Any = self.get_scheduler_config() a_ : str = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): a_ : str = scheduler.step_prk(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: with self.assertRaises(SCREAMING_SNAKE_CASE__ ): a_ : Union[str, Any] = self.scheduler_classes[0] a_ : Union[str, Any] = self.get_scheduler_config() a_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: a_ : List[Any] = self.full_loop() a_ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: a_ : Dict = self.full_loop(prediction_type='v_prediction' ) a_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: # We specify different beta, so that the first alpha is 0.99 a_ : List[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) a_ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: # We specify different beta, so that the first alpha is 0.99 a_ : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) a_ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) a_ : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
570
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ : Tuple = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } UpperCAmelCase_ : str = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off UpperCAmelCase_ : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class SCREAMING_SNAKE_CASE__ ( lowercase__ ): snake_case__ : Dict = VOCAB_FILES_NAMES snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Any = ['''input_ids''', '''attention_mask'''] snake_case__ : str = NllbTokenizer snake_case__ : List[int] = [] snake_case__ : List[int] = [] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it a_ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token a_ : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) a_ : Optional[Any] = vocab_file a_ : str = False if not self.vocab_file else True a_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) a_ : Optional[int] = { lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } a_ : Tuple = src_lang if src_lang is not None else 'eng_Latn' a_ : Any = self.convert_tokens_to_ids(self._src_lang ) a_ : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> None: a_ : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: a_ : Dict = [self.sep_token_id] a_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) a_ : Any = src_lang a_ : Optional[Any] = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) a_ : int = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding: a_ : Union[str, Any] = src_lang a_ : int = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: return self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> None: a_ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: a_ : Dict = [] a_ : Dict = [self.eos_token_id, self.cur_lang_code] else: a_ : Union[str, Any] = [self.cur_lang_code] a_ : List[str] = [self.eos_token_id] a_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) a_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) a_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> None: a_ : Any = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: a_ : Optional[Any] = [] a_ : Any = [self.eos_token_id, self.cur_lang_code] else: a_ : str = [self.cur_lang_code] a_ : str = [self.eos_token_id] a_ : str = self.convert_ids_to_tokens(self.prefix_tokens ) a_ : Any = self.convert_ids_to_tokens(self.suffix_tokens ) a_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return a_ : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
570
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( UpperCAmelCase_, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = RobertaTokenizer _UpperCAmelCase = RobertaTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {"cls_token": "<s>"} def lowerCamelCase_ ( self ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _UpperCAmelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) _UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _UpperCAmelCase = {'unk_token': '<unk>'} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_snake_case ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_snake_case ) ) def lowerCamelCase_ ( self , **snake_case ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case ) def lowerCamelCase_ ( self , **snake_case ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: _UpperCAmelCase = 'lower newer' _UpperCAmelCase = 'lower newer' return input_text, output_text def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = 'lower newer' _UpperCAmelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _UpperCAmelCase = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True) self.assertListEqual(_snake_case , _snake_case ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = self.tokenizer_class.from_pretrained('roberta-base' ) _UpperCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case ) _UpperCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case ) _UpperCAmelCase = tokenizer.encode( 'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) _UpperCAmelCase = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = 'Encode this sequence.' _UpperCAmelCase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_snake_case , _snake_case ) _UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_snake_case , _snake_case ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _UpperCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_snake_case , _snake_case ) # Testing spaces after special tokens _UpperCAmelCase = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space _UpperCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case ) _UpperCAmelCase = 'Encode <mask> sequence' _UpperCAmelCase = 'Encode <mask>sequence' _UpperCAmelCase = tokenizer.encode(_snake_case ) _UpperCAmelCase = encoded.index(_snake_case ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_snake_case , _snake_case ) _UpperCAmelCase = tokenizer.encode(_snake_case ) _UpperCAmelCase = encoded.index(_snake_case ) _UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_snake_case , _snake_case ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case ) _UpperCAmelCase = 'A, <mask> AllenNLP sentence.' _UpperCAmelCase = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) _UpperCAmelCase = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowerCamelCase_ ( self ) -> int: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case ) self.assertEqual(post_processor_state['trim_offsets'] , _snake_case ) def lowerCamelCase_ ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _UpperCAmelCase = f'{text_of_1_token} {text_of_1_token}' _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = f' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( _snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case ) _UpperCAmelCase = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
707
"""simple docstring""" import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase__ ( A ): '''simple docstring''' def lowerCamelCase_ ( self ) -> int: _UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) ) self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) ) class lowercase__ : '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_sizes _UpperCAmelCase = patch_stride _UpperCAmelCase = patch_padding _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = num_labels _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = num_heads _UpperCAmelCase = stride_kv _UpperCAmelCase = depth _UpperCAmelCase = cls_token _UpperCAmelCase = attention_drop_rate _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self ) -> List[str]: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = CvtModel(config=snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case ) _UpperCAmelCase = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): _UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = CvtForImageClassification(snake_case ) model.to(snake_case ) model.eval() _UpperCAmelCase = model(snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase__ ( A, A, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else () _UpperCAmelCase = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = CvtModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCamelCase_ ( self ) -> Union[str, Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self ) -> Union[str, Any]: return @unittest.skip(reason='Cvt does not output attentions' ) def lowerCamelCase_ ( self ) -> str: pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def lowerCamelCase_ ( self ) -> int: pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def lowerCamelCase_ ( self ) -> Union[str, Any]: pass def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase_ ( self ) -> Optional[int]: def check_hidden_states_output(snake_case , snake_case , snake_case ): _UpperCAmelCase = model_class(snake_case ) model.to(snake_case ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = len(self.model_tester.depth ) self.assertEqual(len(snake_case ) , snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(snake_case , snake_case , snake_case ) def lowerCamelCase_ ( self ) -> Any: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCamelCase_ ( self ) -> Dict: pass @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = CvtModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) def UpperCAmelCase ( ): '''simple docstring''' _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCamelCase_ ( self ) -> List[Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**snake_case ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case ) _UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
24
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def a ( A__=None ) -> Union[str, Any]: '''simple docstring''' if subparsers is not None: SCREAMING_SNAKE_CASE__ : int = subparsers.add_parser('''env''' ) else: SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=A__ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def a ( A__ ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.__version__ SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.is_available() SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_xpu_available() SCREAMING_SNAKE_CASE__ : List[Any] = is_npu_available() SCREAMING_SNAKE_CASE__ : Tuple = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(A__ ): SCREAMING_SNAKE_CASE__ : List[str] = load_config_from_file(args.config_file ).to_dict() SCREAMING_SNAKE_CASE__ : int = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""", '''PyTorch XPU available''': str(A__ ), '''PyTorch NPU available''': str(A__ ), '''System RAM''': f"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""", } if pt_cuda_available: SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ( '''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(A__ , A__ ) else f"""\t{accelerate_config}""" ) print(A__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = accelerate_config return info def a ( ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = env_command_parser() SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() env_command(A__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
35
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[int]: """simple docstring""" UpperCamelCase_ = SwinvaConfig() UpperCamelCase_ = swinva_name.split("_" ) UpperCamelCase_ = name_split[1] if "to" in name_split[3]: UpperCamelCase_ = int(name_split[3][-3:] ) else: UpperCamelCase_ = int(name_split[3] ) if "to" in name_split[2]: UpperCamelCase_ = int(name_split[2][-2:] ) else: UpperCamelCase_ = int(name_split[2][6:] ) if model_size == "tiny": UpperCamelCase_ = 9_6 UpperCamelCase_ = (2, 2, 6, 2) UpperCamelCase_ = (3, 6, 1_2, 2_4) elif model_size == "small": UpperCamelCase_ = 9_6 UpperCamelCase_ = (2, 2, 1_8, 2) UpperCamelCase_ = (3, 6, 1_2, 2_4) elif model_size == "base": UpperCamelCase_ = 1_2_8 UpperCamelCase_ = (2, 2, 1_8, 2) UpperCamelCase_ = (4, 8, 1_6, 3_2) else: UpperCamelCase_ = 1_9_2 UpperCamelCase_ = (2, 2, 1_8, 2) UpperCamelCase_ = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: UpperCamelCase_ = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): UpperCamelCase_ = 2_1_8_4_1 UpperCamelCase_ = "huggingface/label-files" UpperCamelCase_ = "imagenet-22k-id2label.json" UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCamelCase_ = idalabel UpperCamelCase_ = {v: k for k, v in idalabel.items()} else: UpperCamelCase_ = 1_0_0_0 UpperCamelCase_ = "huggingface/label-files" UpperCamelCase_ = "imagenet-1k-id2label.json" UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCamelCase_ = idalabel UpperCamelCase_ = {v: k for k, v in idalabel.items()} UpperCamelCase_ = img_size UpperCamelCase_ = num_classes UpperCamelCase_ = embed_dim UpperCamelCase_ = depths UpperCamelCase_ = num_heads UpperCamelCase_ = window_size return config def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]: """simple docstring""" if "patch_embed.proj" in name: UpperCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: UpperCamelCase_ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: UpperCamelCase_ = "encoder." + name if "attn.proj" in name: UpperCamelCase_ = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: UpperCamelCase_ = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCamelCase_ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCamelCase_ = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCamelCase_ = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: UpperCamelCase_ = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: UpperCamelCase_ = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: UpperCamelCase_ = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: UpperCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": UpperCamelCase_ = "layernorm.weight" if name == "norm.bias": UpperCamelCase_ = "layernorm.bias" if "head" in name: UpperCamelCase_ = name.replace("head" , "classifier" ) else: UpperCamelCase_ = "swinv2." + name return name def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: """simple docstring""" for key in orig_state_dict.copy().keys(): UpperCamelCase_ = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "mask" in key: continue elif "qkv" in key: UpperCamelCase_ = key.split("." ) UpperCamelCase_ = int(key_split[1] ) UpperCamelCase_ = int(key_split[3] ) UpperCamelCase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCamelCase_ = val[:dim, :] UpperCamelCase_ = val[dim : dim * 2, :] UpperCamelCase_ = val[-dim:, :] else: UpperCamelCase_ = val[:dim] UpperCamelCase_ = val[ dim : dim * 2 ] UpperCamelCase_ = val[-dim:] else: UpperCamelCase_ = val return orig_state_dict def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: """simple docstring""" UpperCamelCase_ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() UpperCamelCase_ = get_swinva_config(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase_ = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCamelCase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) UpperCamelCase_ = timm_model(inputs["pixel_values"] ) UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).logits assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swinv2_name""", default="""swinv2_tiny_patch4_window8_256""", type=str, help="""Name of the Swinv2 timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) SCREAMING_SNAKE_CASE :int = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
628
0
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __UpperCAmelCase = """src/diffusers""" __UpperCAmelCase = """.""" # This is to make sure the diffusers module imported is the one in the repo. __UpperCAmelCase = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) __UpperCAmelCase = spec.loader.load_module() def _lowerCamelCase ( A_ : Union[str, Any] , A_ : Dict ) -> List[str]: '''simple docstring''' return line.startswith(A_ ) or len(A_ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , A_ ) is not None def _lowerCamelCase ( A_ : Optional[Any] ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Any =object_name.split("." ) UpperCamelCase__ : List[Any] =0 # First let's find the module where our object lives. UpperCamelCase__ : Optional[int] =parts[i] while i < len(A_ ) and not os.path.isfile(os.path.join(A_ , f'''{module}.py''' ) ): i += 1 if i < len(A_ ): UpperCamelCase__ : List[Any] =os.path.join(A_ , parts[i] ) if i >= len(A_ ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(A_ , f'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : Union[str, Any] =f.readlines() # Now let's find the class / func in the code! UpperCamelCase__ : str ="" UpperCamelCase__ : Optional[Any] =0 for name in parts[i + 1 :]: while ( line_index < len(A_ ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(A_ ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). UpperCamelCase__ : Union[str, Any] =line_index while line_index < len(A_ ) and _should_continue(lines[line_index] , A_ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCamelCase__ : Tuple =lines[start_index:line_index] return "".join(A_ ) __UpperCAmelCase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") __UpperCAmelCase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") __UpperCAmelCase = re.compile(r"""<FILL\s+[^>]*>""") def _lowerCamelCase ( A_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ : Optional[Any] =code.split("\n" ) UpperCamelCase__ : Any =0 while idx < len(A_ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(A_ ): return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0] return "" def _lowerCamelCase ( A_ : Optional[int] ) -> Any: '''simple docstring''' UpperCamelCase__ : int =len(get_indent(A_ ) ) > 0 if has_indent: UpperCamelCase__ : List[Any] =f'''class Bla:\n{code}''' UpperCamelCase__ : Optional[int] =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=A_ ) UpperCamelCase__ : Tuple =black.format_str(A_ , mode=A_ ) UpperCamelCase__ : List[Any] =style_docstrings_in_code(A_ ) return result[len("class Bla:\n" ) :] if has_indent else result def _lowerCamelCase ( A_ : Union[str, Any] , A_ : List[str]=False ) -> Union[str, Any]: '''simple docstring''' with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : List[Any] =f.readlines() UpperCamelCase__ : List[str] =[] UpperCamelCase__ : List[str] =0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(A_ ): UpperCamelCase__ : Dict =_re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. UpperCamelCase__ : Optional[Any] =search.groups() UpperCamelCase__ : List[str] =find_code_in_diffusers(A_ ) UpperCamelCase__ : str =get_indent(A_ ) UpperCamelCase__ : Optional[int] =line_index + 1 if indent == theoretical_indent else line_index + 2 UpperCamelCase__ : str =theoretical_indent UpperCamelCase__ : int =start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. UpperCamelCase__ : List[str] =True while line_index < len(A_ ) and should_continue: line_index += 1 if line_index >= len(A_ ): break UpperCamelCase__ : int =lines[line_index] UpperCamelCase__ : List[str] =_should_continue(A_ , A_ ) and re.search(f'''^{indent}# End copy''' , A_ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 UpperCamelCase__ : str =lines[start_index:line_index] UpperCamelCase__ : Dict ="".join(A_ ) # Remove any nested `Copied from` comments to avoid circular copies UpperCamelCase__ : Optional[int] =[line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(A_ ) is None] UpperCamelCase__ : Dict ="\n".join(A_ ) # Before comparing, use the `replace_pattern` on the original code. if len(A_ ) > 0: UpperCamelCase__ : Any =replace_pattern.replace("with" , "" ).split("," ) UpperCamelCase__ : Union[str, Any] =[_re_replace_pattern.search(A_ ) for p in patterns] for pattern in patterns: if pattern is None: continue UpperCamelCase__ : str =pattern.groups() UpperCamelCase__ : List[str] =re.sub(A_ , A_ , A_ ) if option.strip() == "all-casing": UpperCamelCase__ : Any =re.sub(obja.lower() , obja.lower() , A_ ) UpperCamelCase__ : List[str] =re.sub(obja.upper() , obja.upper() , A_ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line UpperCamelCase__ : Dict =blackify(lines[start_index - 1] + theoretical_code ) UpperCamelCase__ : int =theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: UpperCamelCase__ : List[Any] =lines[:start_index] + [theoretical_code] + lines[line_index:] UpperCamelCase__ : List[Any] =start_index + 1 if overwrite and len(A_ ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(A_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(A_ ) return diffs def _lowerCamelCase ( A_ : bool = False ) -> int: '''simple docstring''' UpperCamelCase__ : List[str] =glob.glob(os.path.join(A_ , "**/*.py" ) , recursive=A_ ) UpperCamelCase__ : int =[] for filename in all_files: UpperCamelCase__ : str =is_copy_consistent(A_ , A_ ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(A_ ) > 0: UpperCamelCase__ : Any ="\n".join(A_ ) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __UpperCAmelCase = parser.parse_args() check_copies(args.fix_and_overwrite)
717
from math import factorial __UpperCAmelCase = {str(digit): factorial(digit) for digit in range(10)} def _lowerCamelCase ( A_ : int ) -> int: '''simple docstring''' if not isinstance(A_ , A_ ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) ) def _lowerCamelCase ( A_ : int = 6_0 , A_ : int = 1_0_0_0_0_0_0 ) -> int: '''simple docstring''' if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length UpperCamelCase__ : str =0 # the cached sizes of the previous chains UpperCamelCase__ : dict[int, int] ={} for start_chain_element in range(1 , A_ ): # The temporary set will contain the elements of the chain UpperCamelCase__ : Any =set() UpperCamelCase__ : Optional[Any] =0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. UpperCamelCase__ : str =start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(A_ ) chain_set_length += 1 UpperCamelCase__ : Tuple =digit_factorial_sum(A_ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] UpperCamelCase__ : List[str] =chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
582
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case__ : str = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[str] = ["""OwlViTFeatureExtractor"""] snake_case__ : Tuple = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar UpperCAmelCase = TypeVar('''T''') class A_ ( Generic[T] ): '''simple docstring''' def __init__( self , snake_case ): lowercase = data lowercase = None def __str__( self ): return F'''{self.data}''' class A_ ( Generic[T] ): '''simple docstring''' def __init__( self ): lowercase = None def __iter__( self ): lowercase = self.top while node: yield node.data lowercase = node.next def __str__( self ): return "->".join([str(snake_case ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def SCREAMING_SNAKE_CASE__ ( self ): return self.top is None def SCREAMING_SNAKE_CASE__ ( self , snake_case ): lowercase = Node(snake_case ) if not self.is_empty(): lowercase = self.top lowercase = node def SCREAMING_SNAKE_CASE__ ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , snake_case ) lowercase = self.top lowercase = self.top.next return pop_node.data def SCREAMING_SNAKE_CASE__ ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def SCREAMING_SNAKE_CASE__ ( self ): lowercase = None if __name__ == "__main__": from doctest import testmod testmod()
84
0
"""simple docstring""" import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def a__ ( a : int ): """simple docstring""" return EnvironmentCommand() class _UpperCAmelCase ( _snake_case): @staticmethod def lowerCamelCase__ ( snake_case_ ): _snake_case : List[str] = parser.add_parser("env" ) download_parser.set_defaults(func=snake_case_ ) def lowerCamelCase__ ( self ): _snake_case : Union[str, Any] = huggingface_hub.__version__ _snake_case : int = "not installed" _snake_case : Tuple = "NA" if is_torch_available(): import torch _snake_case : int = torch.__version__ _snake_case : int = torch.cuda.is_available() _snake_case : List[str] = "not installed" if is_transformers_available(): import transformers _snake_case : Optional[Any] = transformers.__version__ _snake_case : Tuple = "not installed" if is_accelerate_available(): import accelerate _snake_case : Optional[int] = accelerate.__version__ _snake_case : List[str] = "not installed" if is_xformers_available(): import xformers _snake_case : Any = xformers.__version__ _snake_case : int = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(snake_case_ ) ) return info @staticmethod def lowerCamelCase__ ( snake_case_ ): return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
87
"""simple docstring""" def a__ ( a : int ): """simple docstring""" if not isinstance(a , a ): raise TypeError("Input value must be an 'int' type" ) _snake_case : Union[str, Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
87
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=224 , UpperCAmelCase__=1000 , UpperCAmelCase__=[3, 3, 6, 4] , UpperCAmelCase__=[48, 56, 112, 220] , ) -> List[str]: a_ = parent a_ = batch_size a_ = num_channels a_ = is_training a_ = use_labels a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = num_labels a_ = image_size a_ = layer_depths a_ = embed_dims def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = None if self.use_labels: a_ = ids_tensor([self.batch_size] , self.num_labels ) a_ = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase__ , layer_scale_init_value=1e-5 , ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: a_ = SwiftFormerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: a_ = self.num_labels a_ = SwiftFormerForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) a_ = SwiftFormerForImageClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: ((a_) , (a_) , (a_)) = self.prepare_config_and_inputs() a_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _snake_case ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" _UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () _UpperCamelCase = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: a_ = SwiftFormerModelTester(self ) a_ = ConfigTester( self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = model_class(UpperCAmelCase__ ) a_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ = [*signature.parameters.keys()] a_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: a_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ = SwiftFormerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: pass def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): a_ = model_class(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() with torch.no_grad(): a_ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) a_ = outputs.hidden_states a_ = 8 self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(UpperCAmelCase__ ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a_ = True check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: def _config_zero_init(UpperCAmelCase__ ): a_ = copy.deepcopy(UpperCAmelCase__ ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(UpperCAmelCase__ , UpperCAmelCase__ , 1e-10 ) if isinstance(getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ ): a_ = _config_zero_init(getattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return configs_no_init a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common() a_ = _config_zero_init(UpperCAmelCase__ ) for model_class in self.all_model_classes: a_ = model_class(config=UpperCAmelCase__ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def a ( ) -> Optional[Any]: """simple docstring""" a_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> int: return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: a_ = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(UpperCAmelCase__ ) a_ = self.default_image_processor a_ = prepare_img() a_ = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ ) # forward pass with torch.no_grad(): a_ = model(**UpperCAmelCase__ ) # verify the logits a_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase__ ) a_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
697
'''simple docstring''' from __future__ import annotations def a ( _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # We need to create solution object to save path. a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )] a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase ) if solved: print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) ) else: print('No solution exists!' ) return solved def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool: """simple docstring""" a_ = len(_UpperCAmelCase ) # Final check point. if i == j == (size - 1): a_ = 1 return True a_ = (not i < 0) and (not j < 0) # Check lower bounds a_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. a_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited a_ = 1 # check for directions if ( run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase ) or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase ) ): return True a_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
697
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowerCAmelCase_ = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
470
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: # Return True if there is node that has not iterated. lowerCAmelCase__ : List[str] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : int = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : int = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Tuple = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = float('''Inf''' ) lowerCAmelCase__ : Optional[Any] = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Optional[Any] = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[str] = sink while v != source: lowerCAmelCase__ : Union[str, Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Dict = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
470
1
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [False] * len(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] queue.append(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = True while queue: SCREAMING_SNAKE_CASE__ : Union[str, Any] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = True SCREAMING_SNAKE_CASE__ : Optional[int] = u return visited[t] def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [-1] * (len(SCREAMING_SNAKE_CASE__ )) SCREAMING_SNAKE_CASE__ : int = 0 while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : int = float("Inf" ) SCREAMING_SNAKE_CASE__ : List[Any] = sink while s != source: # Find the minimum value in select path SCREAMING_SNAKE_CASE__ : str = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] ) SCREAMING_SNAKE_CASE__ : List[str] = parent[s] max_flow += path_flow SCREAMING_SNAKE_CASE__ : Tuple = sink while v != source: SCREAMING_SNAKE_CASE__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow SCREAMING_SNAKE_CASE__ : Dict = parent[v] return max_flow _lowerCamelCase : Union[str, Any] = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowerCamelCase , _lowerCamelCase : List[str] = 0, 5 print(ford_fulkerson(graph, source, sink))
663
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = GPTaTokenizer UpperCAmelCase_ = GPTaTokenizerFast UpperCAmelCase_ = True UpperCAmelCase_ = {"add_prefix_space": True} UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "lower newer" SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer" return input_text, output_text def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple = "lower newer" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Dict ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = "lower newer" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) # Simple input SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) def A_ ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" ) # Simple input SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"] SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : int = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0], _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def A_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def A_ ( self : Dict ) -> str: """simple docstring""" # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this." SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please." SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus( _UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase ) ] SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) @require_tokenizers class lowerCamelCase (unittest.TestCase ): """simple docstring""" def A_ ( self : Optional[Any] ) -> int: """simple docstring""" # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat" SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode( _UpperCAmelCase, ) # Same as above self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def A_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = "bos" SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"] SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode( _UpperCAmelCase, ) # We changed the bos token self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
663
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowerCAmelCase_ ( lowercase__ ): """simple docstring""" _lowerCAmelCase : str = """mctct""" def __init__( self , lowerCAmelCase=80_65 , lowerCAmelCase=15_36 , lowerCAmelCase=36 , lowerCAmelCase=61_44 , lowerCAmelCase=4 , lowerCAmelCase=3_84 , lowerCAmelCase=9_20 , lowerCAmelCase=1E-5 , lowerCAmelCase=0.3 , lowerCAmelCase="relu" , lowerCAmelCase=0.02 , lowerCAmelCase=0.3 , lowerCAmelCase=0.3 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0.3 , lowerCAmelCase=1 , lowerCAmelCase=(7,) , lowerCAmelCase=(3,) , lowerCAmelCase=80 , lowerCAmelCase=1 , lowerCAmelCase=None , lowerCAmelCase="sum" , lowerCAmelCase=False , **lowerCAmelCase , ): """simple docstring""" super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = intermediate_size snake_case = num_attention_heads snake_case = attention_head_dim snake_case = max_position_embeddings snake_case = layer_norm_eps snake_case = layerdrop snake_case = hidden_act snake_case = initializer_range snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = pad_token_id snake_case = bos_token_id snake_case = eos_token_id snake_case = conv_glu_dim snake_case = conv_dropout snake_case = num_conv_layers snake_case = input_feat_per_channel snake_case = input_channels snake_case = conv_channels snake_case = ctc_loss_reduction snake_case = ctc_zero_infinity # prevents config testing fail with exporting to json snake_case = list(UpperCAmelCase__ ) snake_case = list(UpperCAmelCase__ ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """ F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
700
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" @require_torch def snake_case ( self ): """simple docstring""" snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache snake_case = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase ) BertModel.from_pretrained(lowerCAmelCase ) BertTokenizer.from_pretrained(lowerCAmelCase ) pipeline(task='fill-mask' , model=lowerCAmelCase ) # baseline - just load from_pretrained with normal network snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed snake_case = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case = '1' snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache snake_case = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(lowerCAmelCase ) BertModel.from_pretrained(lowerCAmelCase ) BertTokenizer.from_pretrained(lowerCAmelCase ) pipeline(task='fill-mask' , model=lowerCAmelCase ) # baseline - just load from_pretrained with normal network snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed snake_case = self.get_env() snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network snake_case = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed snake_case = self.get_env() snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case = '1' snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = '\nfrom transformers import pipeline\n ' snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' snake_case = self.get_env() snake_case = '1' snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )] snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = '\nfrom transformers import AutoModel\n ' snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network snake_case = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed snake_case = self.get_env() snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files snake_case = '1' snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
104
0
import numpy as np def a__ ( A_ ): '''simple docstring''' return 1 / (1 + np.exp(-vector )) def a__ ( A_ ): '''simple docstring''' return vector * sigmoid(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
529
import string def a__ ( A_ ): '''simple docstring''' __magic_name__ = """""" for i in sequence: __magic_name__ = ord(A_ ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def a__ ( A_ ): '''simple docstring''' __magic_name__ = string.ascii_letters __magic_name__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(A_ )] if c in letters else c for c in sequence ) def a__ ( ): '''simple docstring''' from timeit import timeit print("""Running performance benchmarks...""" ) __magic_name__ = """from string import printable ; from __main__ import atbash, atbash_slow""" print(f'''> atbash_slow(): {timeit('atbash_slow(printable)', setup=A_ )} seconds''' ) print(f'''> atbash(): {timeit('atbash(printable)', setup=A_ )} seconds''' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(F'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
529
1
from typing import Any def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> list[Any]: """simple docstring""" if not input_list: return [] __lowerCamelCase = [input_list.count(lowerCamelCase_ ) for value in input_list] __lowerCamelCase = max(lowerCamelCase_ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowerCamelCase_ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
706
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __A = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = ['''input_features''', '''is_longer'''] def __init__( self , lowerCamelCase__=64 , lowerCamelCase__=48_000 , lowerCamelCase__=480 , lowerCamelCase__=10 , lowerCamelCase__=1_024 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 14_000 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__( feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , ) __lowerCamelCase = top_db __lowerCamelCase = truncation __lowerCamelCase = padding __lowerCamelCase = fft_window_size __lowerCamelCase = (fft_window_size >> 1) + 1 __lowerCamelCase = hop_length __lowerCamelCase = max_length_s __lowerCamelCase = max_length_s * sampling_rate __lowerCamelCase = sampling_rate __lowerCamelCase = frequency_min __lowerCamelCase = frequency_max __lowerCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='htk' , ) __lowerCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='slaney' , mel_scale='slaney' , ) def lowercase_ ( self ) -> Dict[str, Any]: '''simple docstring''' __lowerCamelCase = copy.deepcopy(self.__dict__ ) __lowerCamelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> np.ndarray: '''simple docstring''' __lowerCamelCase = spectrogram( lowerCamelCase__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='dB' , ) return log_mel_spectrogram.T def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' __lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk __lowerCamelCase = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk __lowerCamelCase = [0] # randomly choose index for each part __lowerCamelCase = np.random.choice(ranges[0] ) __lowerCamelCase = np.random.choice(ranges[1] ) __lowerCamelCase = np.random.choice(ranges[2] ) __lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :] __lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :] __lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :] __lowerCamelCase = torch.tensor(mel[None, None, :] ) __lowerCamelCase = torch.nn.functional.interpolate( lowerCamelCase__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase__ ) __lowerCamelCase = mel_shrink[0][0].numpy() __lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": __lowerCamelCase = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __lowerCamelCase = len(lowerCamelCase__ ) - max_length __lowerCamelCase = np.random.randint(0 , overflow + 1 ) __lowerCamelCase = waveform[idx : idx + max_length] __lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": __lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters ) __lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __lowerCamelCase = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 ) __lowerCamelCase = False else: __lowerCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: __lowerCamelCase = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __lowerCamelCase = int(max_length / len(lowerCamelCase__ ) ) __lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": __lowerCamelCase = int(max_length / len(lowerCamelCase__ ) ) __lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 ) if truncation == "fusion": __lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters ) __lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: __lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature: '''simple docstring''' __lowerCamelCase = truncation if truncation is not None else self.truncation __lowerCamelCase = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __lowerCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __lowerCamelCase = is_batched_numpy or ( isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ): __lowerCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa ) elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCamelCase = [np.asarray(lowerCamelCase__ )] # convert to mel spectrogram, truncate and pad if needed. __lowerCamelCase = [ self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ ) for waveform in raw_speech ] __lowerCamelCase = [] __lowerCamelCase = [] for mel, longer in padded_inputs: input_mel.append(lowerCamelCase__ ) is_longer.append(lowerCamelCase__ ) if truncation == "fusion" and sum(lowerCamelCase__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __lowerCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) ) __lowerCamelCase = True if isinstance(input_mel[0] , lowerCamelCase__ ): __lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool __lowerCamelCase = [[longer] for longer in is_longer] __lowerCamelCase = {'input_features': input_mel, 'is_longer': is_longer} __lowerCamelCase = BatchFeature(lowerCamelCase__ ) if return_tensors is not None: __lowerCamelCase = input_features.convert_to_tensors(lowerCamelCase__ ) return input_features
167
0
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available a : List[Any] = logging.getLogger(__name__) @dataclass class a_ : a : str a : List[str] a : Optional[List[str]] @dataclass class a_ : a : List[int] a : List[int] a : Optional[List[int]] = None a : Optional[List[int]] = None class a_ ( lowerCamelCase__ ): a : Tuple = '''train''' a : int = '''dev''' a : List[Any] = '''test''' class a_ : @staticmethod def _snake_case ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[Split, str] ) ->str: '''simple docstring''' raise NotImplementedError @staticmethod def _snake_case ( __UpperCamelCase : str ) ->str: '''simple docstring''' raise NotImplementedError @staticmethod def _snake_case ( __UpperCamelCase : List[InputExample] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : str=1 , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : Dict=False , __UpperCamelCase : str=False , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : str=0 , __UpperCamelCase : Dict=-1_00 , __UpperCamelCase : Any=0 , __UpperCamelCase : Optional[int]=True , ) ->str: '''simple docstring''' _UpperCAmelCase = {label: i for i, label in enumerate(lowerCAmelCase__ )} _UpperCAmelCase = [] for ex_index, example in enumerate(lowerCAmelCase__ ): if ex_index % 1_00_00 == 0: logger.info("""Writing example %d of %d""" , lowerCAmelCase__ , len(lowerCAmelCase__ ) ) _UpperCAmelCase = [] _UpperCAmelCase = [] for word, label in zip(example.words , example.labels ): _UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase__ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(lowerCAmelCase__ ) > 0: tokens.extend(lowerCAmelCase__ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCAmelCase__ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. _UpperCAmelCase = tokenizer.num_special_tokens_to_add() if len(lowerCAmelCase__ ) > max_seq_length - special_tokens_count: _UpperCAmelCase = tokens[: (max_seq_length - special_tokens_count)] _UpperCAmelCase = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] _UpperCAmelCase = [sequence_a_segment_id] * len(lowerCAmelCase__ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: _UpperCAmelCase = [cls_token] + tokens _UpperCAmelCase = [pad_token_label_id] + label_ids _UpperCAmelCase = [cls_token_segment_id] + segment_ids _UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. _UpperCAmelCase = [1 if mask_padding_with_zero else 0] * len(lowerCAmelCase__ ) # Zero-pad up to the sequence length. _UpperCAmelCase = max_seq_length - len(lowerCAmelCase__ ) if pad_on_left: _UpperCAmelCase = ([pad_token] * padding_length) + input_ids _UpperCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask _UpperCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids _UpperCAmelCase = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(lowerCAmelCase__ ) == max_seq_length assert len(lowerCAmelCase__ ) == max_seq_length assert len(lowerCAmelCase__ ) == max_seq_length assert len(lowerCAmelCase__ ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" , example.guid ) logger.info("""tokens: %s""" , """ """.join([str(lowerCAmelCase__ ) for x in tokens] ) ) logger.info("""input_ids: %s""" , """ """.join([str(lowerCAmelCase__ ) for x in input_ids] ) ) logger.info("""input_mask: %s""" , """ """.join([str(lowerCAmelCase__ ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" , """ """.join([str(lowerCAmelCase__ ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" , """ """.join([str(lowerCAmelCase__ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: _UpperCAmelCase = None features.append( InputFeatures( input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , label_ids=lowerCAmelCase__ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class a_ ( lowerCamelCase__ ): a : List[InputFeatures] a : int = nn.CrossEntropyLoss().ignore_index def __init__( self : Any , __UpperCamelCase : TokenClassificationTask , __UpperCamelCase : str , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Split = Split.train , ) ->Dict: '''simple docstring''' _UpperCAmelCase = os.path.join( lowerCAmelCase__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _UpperCAmelCase = cached_features_file + """.lock""" with FileLock(lowerCAmelCase__ ): if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) _UpperCAmelCase = torch.load(lowerCAmelCase__ ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) _UpperCAmelCase = token_classification_task.read_examples_from_file(lowerCAmelCase__ , lowerCAmelCase__ ) # TODO clean up all this to leverage built-in features of tokenizers _UpperCAmelCase = token_classification_task.convert_examples_to_features( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(f"""Saving features into cached file {cached_features_file}""" ) torch.save(self.features , lowerCAmelCase__ ) def __len__( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' return len(self.features ) def __getitem__( self : Optional[int] , __UpperCamelCase : Any ) ->int: '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class a_ : a : List[InputFeatures] a : int = -100 def __init__( self : Optional[int] , __UpperCamelCase : TokenClassificationTask , __UpperCamelCase : str , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Split = Split.train , ) ->List[str]: '''simple docstring''' _UpperCAmelCase = token_classification_task.read_examples_from_file(lowerCAmelCase__ , lowerCAmelCase__ ) # TODO clean up all this to leverage built-in features of tokenizers _UpperCAmelCase = token_classification_task.convert_examples_to_features( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=lowerCAmelCase__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: _UpperCAmelCase = tf.data.Dataset.from_generator( lowerCAmelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , ( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: _UpperCAmelCase = tf.data.Dataset.from_generator( lowerCAmelCase__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , ( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def _snake_case ( self : List[str] ) ->str: '''simple docstring''' _UpperCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Any ) ->Dict: '''simple docstring''' return len(self.features ) def __getitem__( self : int , __UpperCamelCase : List[str] ) ->Dict: '''simple docstring''' return self.features[i]
555
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ : int = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = ['''PoolFormerFeatureExtractor'''] UpperCamelCase__ : Tuple = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
578
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : List[str] = ["""image_processor""", """tokenizer"""] __lowerCAmelCase : str = """ChineseCLIPImageProcessor""" __lowerCAmelCase : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> List[Any]: _a : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCamelCase_ , ) _a : List[Any] = kwargs.pop('feature_extractor' ) _a : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCamelCase_ , lowerCamelCase_ ) _a : Any = self.image_processor def __call__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ) -> List[str]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _a : Dict = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if images is not None: _a : List[Any] = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if text is not None and images is not None: _a : Optional[int] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ ) def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]: return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property def __UpperCamelCase ( self ) -> Optional[Any]: _a : int = self.tokenizer.model_input_names _a : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase ( self ) -> Any: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase_ , ) return self.image_processor_class
705
'''simple docstring''' from ...configuration_utils import PretrainedConfig UpperCAmelCase_ : List[Any] = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : Tuple = """tapas""" def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_=10.0 , lowerCamelCase_=0 , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_="ratio" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=6_4 , lowerCamelCase_=3_2 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) _a : Optional[Any] = vocab_size _a : List[str] = hidden_size _a : Union[str, Any] = num_hidden_layers _a : Tuple = num_attention_heads _a : Tuple = hidden_act _a : Optional[Any] = intermediate_size _a : Dict = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : int = max_position_embeddings _a : str = type_vocab_sizes _a : Tuple = initializer_range _a : int = layer_norm_eps # Fine-tuning task hyperparameters _a : Any = positive_label_weight _a : Optional[int] = num_aggregation_labels _a : Any = aggregation_loss_weight _a : str = use_answer_as_supervision _a : Optional[int] = answer_loss_importance _a : int = use_normalized_answer_loss _a : Optional[int] = huber_loss_delta _a : Optional[int] = temperature _a : Union[str, Any] = aggregation_temperature _a : List[str] = use_gumbel_for_cells _a : Optional[Any] = use_gumbel_for_aggregation _a : str = average_approximation_function _a : Tuple = cell_selection_preference _a : Tuple = answer_loss_cutoff _a : Optional[int] = max_num_rows _a : List[Any] = max_num_columns _a : Any = average_logits_per_cell _a : str = select_one_column _a : Any = allow_empty_column_selection _a : Dict = init_cell_selection_weights_to_zero _a : List[Any] = reset_position_index_per_cell _a : Union[str, Any] = disable_per_token_loss # Aggregation hyperparameters _a : Dict = aggregation_labels _a : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , lowerCamelCase_ ): _a : str = {int(lowerCamelCase_ ): v for k, v in aggregation_labels.items()}
424
0
from __future__ import annotations def A(__a: list[int] , __a: list[int] , __a: int ): lowerCAmelCase_ = list(range(len(__a ) ) ) lowerCAmelCase_ = [v / w for v, w in zip(__a , __a )] index.sort(key=lambda __a : ratio[i] , reverse=__a ) lowerCAmelCase_ = 0 lowerCAmelCase_ = [0] * len(__a ) for i in index: if weight[i] <= capacity: lowerCAmelCase_ = 1 max_value += value[i] capacity -= weight[i] else: lowerCAmelCase_ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
122
from collections import defaultdict class __magic_name__ : def __init__( self , _a , _a ) -> Tuple: lowerCAmelCase_ = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 lowerCAmelCase_ = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(_a ) ) ] lowerCAmelCase_ = defaultdict(_a ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 lowerCAmelCase_ = (1 << len(_a )) - 1 def __a ( self , _a , _a ) -> Optional[Any]: # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement lowerCAmelCase_ = self.count_ways_until(_a , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. lowerCAmelCase_ = total_ways_util return self.dp[mask][task_no] def __a ( self , _a ) -> Optional[int]: # Store the list of persons for each task for i in range(len(_a ) ): for j in task_performed[i]: self.task[j].append(_a ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowerCamelCase__ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowerCamelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
122
1
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class lowerCamelCase_ : def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ): """simple docstring""" __magic_name__ :Union[str, Any] = parent __magic_name__ :Tuple = batch_size __magic_name__ :Union[str, Any] = seq_length __magic_name__ :int = is_training __magic_name__ :Tuple = use_input_mask __magic_name__ :Optional[int] = use_token_type_ids __magic_name__ :Union[str, Any] = use_labels __magic_name__ :Dict = vocab_size __magic_name__ :int = hidden_size __magic_name__ :Union[str, Any] = num_hidden_layers __magic_name__ :Any = num_attention_heads __magic_name__ :str = intermediate_size __magic_name__ :List[Any] = hidden_act __magic_name__ :Tuple = hidden_dropout_prob __magic_name__ :Any = attention_probs_dropout_prob __magic_name__ :Tuple = max_position_embeddings __magic_name__ :str = type_vocab_size __magic_name__ :Tuple = type_sequence_label_size __magic_name__ :Tuple = initializer_range __magic_name__ :Any = num_labels __magic_name__ :Tuple = num_choices __magic_name__ :int = scope def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ :Optional[Any] = None if self.use_input_mask: __magic_name__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ :Any = None if self.use_token_type_ids: __magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ :str = None __magic_name__ :Union[str, Any] = None __magic_name__ :Dict = None if self.use_labels: __magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ :List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = OpenLlamaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) __magic_name__ :Optional[int] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): """simple docstring""" __magic_name__ :str = True __magic_name__ :Any = OpenLlamaModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Union[str, Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , ) __magic_name__ :Optional[Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , ) __magic_name__ :Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): """simple docstring""" __magic_name__ :int = OpenLlamaForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Optional[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): """simple docstring""" __magic_name__ :List[Any] = True __magic_name__ :List[str] = True __magic_name__ :str = OpenLlamaForCausalLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # first forward pass __magic_name__ :Tuple = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , ) __magic_name__ :Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __magic_name__ :Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ :int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __magic_name__ :List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ :Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __magic_name__ :Union[str, Any] = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0] __magic_name__ :Dict = model( __lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0] # select random slice __magic_name__ :str = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ :str = output_from_no_past[:, -3:, random_slice_idx].detach() __magic_name__ :Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = self.prepare_config_and_inputs() ( __magic_name__ ) :Optional[int] = config_and_inputs __magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): a__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) a__ = (OpenLlamaForCausalLM,) if is_torch_available() else () a__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = OpenLlamaModelTester(self ) __magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 ) def A ( self ): """simple docstring""" self.config_tester.run_common_tests() def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ :List[str] = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :int = 3 __magic_name__ :Dict = input_dict['''input_ids'''] __magic_name__ :int = input_ids.ne(1 ).to(__lowerCAmelCase ) __magic_name__ :List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ :Any = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :Dict = 3 __magic_name__ :int = '''single_label_classification''' __magic_name__ :Dict = input_dict['''input_ids'''] __magic_name__ :int = input_ids.ne(1 ).to(__lowerCAmelCase ) __magic_name__ :Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ :Union[str, Any] = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :Any = 3 __magic_name__ :Any = '''multi_label_classification''' __magic_name__ :Optional[int] = input_dict['''input_ids'''] __magic_name__ :Optional[int] = input_ids.ne(1 ).to(__lowerCAmelCase ) __magic_name__ :Dict = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __magic_name__ :Dict = OpenLlamaForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() __magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def A ( self ): """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ :Dict = ids_tensor([1, 1_0] , config.vocab_size ) __magic_name__ :Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ :List[str] = OpenLlamaModel(__lowerCAmelCase ) original_model.to(__lowerCAmelCase ) original_model.eval() __magic_name__ :str = original_model(__lowerCAmelCase ).last_hidden_state __magic_name__ :int = original_model(__lowerCAmelCase ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ :Dict = {'''type''': scaling_type, '''factor''': 10.0} __magic_name__ :str = OpenLlamaModel(__lowerCAmelCase ) scaled_model.to(__lowerCAmelCase ) scaled_model.eval() __magic_name__ :Union[str, Any] = scaled_model(__lowerCAmelCase ).last_hidden_state __magic_name__ :Any = scaled_model(__lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
720
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowerCamelCase_ ( unittest.TestCase ): def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __magic_name__ :Tuple = Vector() def A ( self ): """simple docstring""" __magic_name__ :Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__lowerCAmelCase ) , '''(0,0,0,0,0,1)''' ) def A ( self ): """simple docstring""" __magic_name__ :Optional[int] = Vector([1, 2, 3, 4] ) self.assertEqual(len(__lowerCAmelCase ) , 4 ) def A ( self ): """simple docstring""" __magic_name__ :Optional[Any] = Vector([1, 2] ) __magic_name__ :int = Vector([1, 2, 3, 4, 5] ) __magic_name__ :Any = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __magic_name__ :Optional[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = Vector([1, 2, 3] ) __magic_name__ :List[str] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = Vector([1, 2, 3] ) __magic_name__ :Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def A ( self ): """simple docstring""" __magic_name__ :int = Vector([1, 2, 3] ) __magic_name__ :Optional[int] = Vector([2, -1, 4] ) # for test of dot product __magic_name__ :List[Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def A ( self ): """simple docstring""" self.assertEqual(str(zero_vector(1_0 ) ).count('''0''' ) , 1_0 ) def A ( self ): """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def A ( self ): """simple docstring""" __magic_name__ :Dict = Vector([1, 2, 3] ) __magic_name__ :List[Any] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __lowerCAmelCase , __lowerCAmelCase ) ) , '''(3,4,7)''' ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = Vector([1, 0, 0, 0, 0, 0] ) __magic_name__ :Optional[int] = x.copy() self.assertEqual(str(__lowerCAmelCase ) , str(__lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__lowerCAmelCase ) , '''(0,1,0)''' ) def A ( self ): """simple docstring""" __magic_name__ :Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __magic_name__ :List[str] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__lowerCAmelCase , __lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __magic_name__ :Any = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__lowerCAmelCase , __lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def A ( self ): """simple docstring""" __magic_name__ :str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __magic_name__ :Any = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowerCAmelCase ) ) def A ( self ): """simple docstring""" __magic_name__ :List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def A ( self ): """simple docstring""" __magic_name__ :List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __magic_name__ :Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def A ( self ): """simple docstring""" __magic_name__ :Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __magic_name__ :Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def A ( self ): """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
180
0
"""simple docstring""" __magic_name__ : str = { """A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""", """H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""", """O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""", """V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""", """2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""", """8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""", """:""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""", """?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""", """(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/""" } # Exclamation mark is not in ITU-R recommendation # fmt: on __magic_name__ : int = {value: key for key, value in MORSE_CODE_DICT.items()} def UpperCamelCase (SCREAMING_SNAKE_CASE ): return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def UpperCamelCase (SCREAMING_SNAKE_CASE ): return "".join(REVERSE_DICT[char] for char in message.split() ) def UpperCamelCase (): UpperCamelCase : Any = """Morse code here!""" print(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[int] = encrypt(SCREAMING_SNAKE_CASE ) print(SCREAMING_SNAKE_CASE ) UpperCamelCase : Optional[Any] = decrypt(SCREAMING_SNAKE_CASE ) print(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
102
class _A ( __UpperCamelCase ): pass class _A ( __UpperCamelCase ): pass class _A : def __init__(self ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ = [ [], [], [], ] def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' try: if len(self.queues[priority] ) >= 100: raise OverflowError('''Maximum queue size is 100''' ) self.queues[priority].append(SCREAMING_SNAKE_CASE_ ) except IndexError: raise ValueError('''Valid priorities are 0, 1, and 2''' ) def _a (self ) -> int: '''simple docstring''' for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('''All queues are empty''' ) def __str__(self ) -> str: '''simple docstring''' return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) ) class _A : def __init__(self ) -> str: '''simple docstring''' UpperCamelCase__ = [] def _a (self , SCREAMING_SNAKE_CASE_ ) -> None: '''simple docstring''' if len(self.queue ) == 100: raise OverFlowError('''Maximum queue size is 100''' ) self.queue.append(SCREAMING_SNAKE_CASE_ ) def _a (self ) -> int: '''simple docstring''' if not self.queue: raise UnderFlowError('''The queue is empty''' ) else: UpperCamelCase__ = min(self.queue ) self.queue.remove(SCREAMING_SNAKE_CASE_ ) return data def __str__(self ) -> str: '''simple docstring''' return str(self.queue ) def __UpperCamelCase ( ): UpperCamelCase__ = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(A ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __UpperCamelCase ( ): UpperCamelCase__ = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(A ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
415
0
from __future__ import annotations from typing import Generic, TypeVar _lowerCamelCase = TypeVar('''T''') class UpperCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self , _lowerCAmelCase ): a =data a =self a =0 class UpperCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ): a ={} def lowerCAmelCase__ ( self , _lowerCAmelCase ): a =DisjointSetTreeNode(_lowerCAmelCase ) def lowerCAmelCase__ ( self , _lowerCAmelCase ): a =self.map[data] if elem_ref != elem_ref.parent: a =self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ): if nodea.rank > nodea.rank: a =nodea else: a =nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ): self.link(self.find_set(_lowerCAmelCase ) , self.find_set(_lowerCAmelCase ) ) class UpperCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ): a ={} def lowerCAmelCase__ ( self , _lowerCAmelCase ): if node not in self.connections: a ={} def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.add_node(_lowerCAmelCase ) self.add_node(_lowerCAmelCase ) a =weight a =weight def lowerCAmelCase__ ( self ): a =[] a =set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda _lowerCAmelCase : x[2] ) # creating the disjoint set a =DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(_lowerCAmelCase ) # MST generation a =0 a =0 a =GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a =edges[index] index += 1 a =disjoint_set.find_set(_lowerCAmelCase ) a =disjoint_set.find_set(_lowerCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) disjoint_set.union(_lowerCAmelCase , _lowerCAmelCase ) return graph
721
_lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def lowerCamelCase ( )-> None: """simple docstring""" a =input("""Enter message: """ ) a =input("""Enter key [alphanumeric]: """ ) a =input("""Encrypt/Decrypt [e/d]: """ ) if mode.lower().startswith("""e""" ): a ="""encrypt""" a =encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ ) elif mode.lower().startswith("""d""" ): a ="""decrypt""" a =decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ ) print(F'''\n{mode.title()}ed message:''' ) print(UpperCAmelCase_ ) def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str: """simple docstring""" return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """encrypt""" ) def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str: """simple docstring""" return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """decrypt""" ) def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str: """simple docstring""" a =[] a =0 a =key.upper() for symbol in message: a =LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(UpperCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(UpperCAmelCase_ ): a =0 else: translated.append(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) if __name__ == "__main__": main()
321
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
458
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE (A = 1_000_000 ) -> int: """simple docstring""" lowercase__ = 0 lowercase__ = 0 lowercase__ = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f"""{solution() = }""")
460
0
import math def lowerCamelCase__ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase , _lowercase ): UpperCAmelCase_ : Optional[Any] = f'''Input value of [number={number}] must be an integer''' raise TypeError(_lowercase ) if number < 1: UpperCAmelCase_ : List[str] = f'''Input value of [number={number}] must be > 0''' raise ValueError(_lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: UpperCAmelCase_ : Any = int(math.log(number // 3 , 2 ) ) + 2 UpperCAmelCase_ : Any = [3, 5] UpperCAmelCase_ : str = 2 UpperCAmelCase_ : List[str] = 3 for block in range(1 , _lowercase ): for _ in range(_lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __a = 0 try: __a = proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
300
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __a: """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Optional[Any] = 13 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : str = True UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Dict = 2 UpperCAmelCase_ : Tuple = 99 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Optional[int] = 32 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : List[Any] = 0.1 UpperCAmelCase_ : int = 0.1 UpperCAmelCase_ : List[str] = 512 UpperCAmelCase_ : Any = 16 UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Any = 0.02 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Dict = '''last''' UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Union[str, Any] = 0 def a__ ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase_ : Optional[Any] = None if self.use_input_lengths: UpperCAmelCase_ : Optional[int] = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase_ : List[str] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase_ : str = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Any = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : int = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any: UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str: UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int: UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str: UpperCAmelCase_ : List[Any] = self.num_choices UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ) : Any = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''langs''': token_type_ids, '''lengths''': input_lengths, } return config, inputs_dict @require_tf class __a( _a , _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCAmelCase = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def a__ ( self ) -> Any: UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 ) def a__ ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def a__ ( self ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def a__ ( self ) -> Any: for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_tf @require_sentencepiece @require_tokenizers class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> int: UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' ) UpperCAmelCase_ : Dict = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0] UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. UpperCAmelCase_ : List[Any] = tf.convert_to_tensor( [ [ [-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18], [-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99], [-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
300
1
def _snake_case ( __snake_case , __snake_case ): while a != 0: _UpperCamelCase , _UpperCamelCase = b % a, a return b def _snake_case ( __snake_case , __snake_case ): if gcd(__snake_case , __snake_case ) != 1: _UpperCamelCase = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__snake_case ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1, 0, a _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0, 1, m while va != 0: _UpperCamelCase = ua // va _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
10
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : str = logging.get_logger(__name__) a__ : Any = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class __snake_case ( __magic_name__ ): __lowerCAmelCase = '''xlm''' __lowerCAmelCase = { '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self , UpperCamelCase_=3_0145 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=512 , UpperCamelCase_=2048**-0.5 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_="first" , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> List[str]: snake_case__ = vocab_size snake_case__ = emb_dim snake_case__ = n_layers snake_case__ = n_heads snake_case__ = dropout snake_case__ = attention_dropout snake_case__ = gelu_activation snake_case__ = sinusoidal_embeddings snake_case__ = causal snake_case__ = asm snake_case__ = n_langs snake_case__ = use_lang_emb snake_case__ = layer_norm_eps snake_case__ = bos_index snake_case__ = eos_index snake_case__ = pad_index snake_case__ = unk_index snake_case__ = mask_index snake_case__ = is_encoder snake_case__ = max_position_embeddings snake_case__ = embed_init_std snake_case__ = init_std snake_case__ = summary_type snake_case__ = summary_use_proj snake_case__ = summary_activation snake_case__ = summary_proj_to_labels snake_case__ = summary_first_dropout snake_case__ = start_n_top snake_case__ = end_n_top snake_case__ = mask_token_id snake_case__ = lang_id if "n_words" in kwargs: snake_case__ = kwargs['n_words'] super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) class __snake_case ( __magic_name__ ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
368
0
"""simple docstring""" from __future__ import annotations def lowercase ( __UpperCamelCase ) -> bool: __magic_name__ = str(__UpperCamelCase ) return n == n[::-1] def lowercase ( __UpperCamelCase = 1000000 ) -> Any: __magic_name__ = 0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
190
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class _lowercase ( __UpperCAmelCase , unittest.TestCase ): _lowerCamelCase = ReformerTokenizer _lowerCamelCase = ReformerTokenizerFast _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = True def lowerCAmelCase__ ( self ): super().setUp() __magic_name__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self ): __magic_name__ = '''<s>''' __magic_name__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): __magic_name__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(UpperCamelCase_ ) , 1000 ) def lowerCAmelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCAmelCase__ ( self ): if not self.test_rust_tokenizer: return __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_rust_tokenizer() __magic_name__ = '''I was born in 92000, and this is falsé.''' __magic_name__ = tokenizer.tokenize(UpperCamelCase_ ) __magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) __magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) __magic_name__ = self.get_rust_tokenizer() __magic_name__ = tokenizer.encode(UpperCamelCase_ ) __magic_name__ = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) # Simple input __magic_name__ = '''This is a simple input''' __magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2'''] __magic_name__ = ('''This is a simple input''', '''This is a pair''') __magic_name__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Simple input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' ) # Pair input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='''max_length''' , ) def lowerCAmelCase__ ( self ): pass def lowerCAmelCase__ ( self ): __magic_name__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) __magic_name__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [285, 46, 10, 170, 382] , ) __magic_name__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def lowerCAmelCase__ ( self ): return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def lowerCAmelCase__ ( self ): __magic_name__ = '''Hello World!''' __magic_name__ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): __magic_name__ = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __magic_name__ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @require_torch @slow def lowerCAmelCase__ ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence __magic_name__ = list(self.big_tokenizer.get_vocab().keys() )[:10] __magic_name__ = ''' '''.join(UpperCamelCase_ ) __magic_name__ = self.big_tokenizer.encode_plus(UpperCamelCase_ , return_tensors='''pt''' ) __magic_name__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) __magic_name__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) __magic_name__ = encoded_sequence['''input_ids'''].shape __magic_name__ = ReformerModel(UpperCamelCase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase_ ) model(**UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self ): # fmt: off __magic_name__ = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 __magic_name__ = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCamelCase_ , sequences=UpperCamelCase_ , )
190
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase_ ( __a ): def __init__( self : Dict , _A : List[str] , _A : int ): '''simple docstring''' super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCAmelCase__ : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: UpperCAmelCase__ : int = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) UpperCAmelCase__ : Dict = int(_A ) UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCAmelCase__ : Any = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
75
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
1
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase = '▁' _lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class UpperCamelCase (__snake_case , unittest.TestCase ): _SCREAMING_SNAKE_CASE : int = BigBirdTokenizer _SCREAMING_SNAKE_CASE : Any = BigBirdTokenizerFast _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : str = True def __snake_case ( self :int ) ->int: super().setUp() lowercase : Tuple = self.tokenizer_class(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self :str ) ->int: lowercase : Optional[Any] = """<s>""" lowercase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def __snake_case ( self :str ) ->Dict: lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """[MASK]""" ) self.assertEqual(len(__magic_name__ ) , 1_004 ) def __snake_case ( self :List[str] ) ->Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def __snake_case ( self :List[str] ) ->Dict: if not self.test_rust_tokenizer: return lowercase : Union[str, Any] = self.get_tokenizer() lowercase : Any = self.get_rust_tokenizer() lowercase : Optional[Any] = """I was born in 92000, and this is falsé.""" lowercase : List[str] = tokenizer.tokenize(__magic_name__ ) lowercase : List[Any] = rust_tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) lowercase : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) lowercase : List[str] = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) lowercase : Tuple = self.get_rust_tokenizer() lowercase : Union[str, Any] = tokenizer.encode(__magic_name__ ) lowercase : Any = rust_tokenizer.encode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def __snake_case ( self :List[str] ) ->Optional[Any]: lowercase : List[str] = BigBirdTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [285, 46, 10, 170, 382] , ) lowercase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase : str = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def __snake_case ( self :Dict ) ->Dict: return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) @slow def __snake_case ( self :Tuple ) ->int: lowercase : Optional[int] = """Hello World!""" lowercase : Tuple = [65, 18_536, 2_260, 101, 66] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def __snake_case ( self :int ) ->List[Any]: lowercase : Union[str, Any] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off lowercase : List[Any] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def __snake_case ( self :Dict ) ->List[Any]: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : Dict = """ """.join(__magic_name__ ) lowercase : Optional[Any] = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowercase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowercase : Any = BigBirdConfig(attention_type="""original_full""" ) lowercase : Tuple = BigBirdModel(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def __snake_case ( self :Optional[int] ) ->Any: lowercase : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" ) lowercase : Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids ) self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" ) @slow def __snake_case ( self :Optional[Any] ) ->str: # fmt: off lowercase : Any = {"""input_ids""": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
348
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp _lowerCAmelCase = { 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } _lowerCAmelCase = { 'RUCAIBox/mvp': 10_24, } class UpperCamelCase (__snake_case ): _SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : Any = ["""input_ids""", """attention_mask"""] _SCREAMING_SNAKE_CASE : Optional[Any] = MvpTokenizer def __init__( self :int , __magic_name__ :Any=None , __magic_name__ :Any=None , __magic_name__ :Dict=None , __magic_name__ :Dict="replace" , __magic_name__ :Any="<s>" , __magic_name__ :Optional[Any]="</s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[Any]="<s>" , __magic_name__ :Any="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :int="<mask>" , __magic_name__ :int=False , __magic_name__ :str=True , **__magic_name__ :Tuple , ) ->str: super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) lowercase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: lowercase : List[str] = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) ) lowercase : List[Any] = add_prefix_space lowercase : List[str] = pre_tok_class(**__magic_name__ ) lowercase : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase : List[Any] = """post_processor""" lowercase : List[str] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: lowercase : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Dict = tuple(state["""sep"""] ) if "cls" in state: lowercase : Union[str, Any] = tuple(state["""cls"""] ) lowercase : Dict = False if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space: lowercase : str = add_prefix_space lowercase : Tuple = True if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets: lowercase : Dict = trim_offsets lowercase : Any = True if changes_to_apply: lowercase : List[str] = getattr(__magic_name__ , state.pop("""type""" ) ) lowercase : Any = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def __snake_case ( self :int ) ->str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def __snake_case ( self :Any , __magic_name__ :List[Any] ) ->Dict: lowercase : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value lowercase : List[Any] = value def __snake_case ( self :Optional[Any] , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[int] ) ->BatchEncoding: lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def __snake_case ( self :Optional[int] , *__magic_name__ :Optional[Any] , **__magic_name__ :Union[str, Any] ) ->BatchEncoding: lowercase : Tuple = kwargs.get("""is_split_into_words""" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def __snake_case ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]: lowercase : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def __snake_case ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :List[Any]=None ) ->int: lowercase : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __snake_case ( self :Dict , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]: lowercase : Any = [self.sep_token_id] lowercase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
348
1
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _a ( UpperCamelCase__ ): def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = SMALL_MODEL_IDENTIFIER lowercase__ = '''pt''' lowercase__ = '''tf''' def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any ) -> int: """simple docstring""" lowercase__ = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]: """simple docstring""" lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ ) model_tf.save_pretrained(UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> Any: """simple docstring""" lowercase__ = '''mock_framework''' # Framework provided - return whatever the user provides lowercase__ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase_ ) lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase_ ) lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(UpperCamelCase_ ): lowercase__ = FeaturesManager.determine_framework(UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" lowercase__ = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ): lowercase__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow lowercase__ = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): lowercase__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_tf ) # Both in environment -> use PyTorch lowercase__ = MagicMock(return_value=UpperCamelCase_ ) lowercase__ = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch( '''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): lowercase__ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase_ , self.framework_pt ) # Both not in environment -> raise error lowercase__ = MagicMock(return_value=UpperCamelCase_ ) lowercase__ = MagicMock(return_value=UpperCamelCase_ ) with patch('''transformers.onnx.features.is_tf_available''' , UpperCamelCase_ ), patch( '''transformers.onnx.features.is_torch_available''' , UpperCamelCase_ ): with self.assertRaises(UpperCamelCase_ ): lowercase__ = FeaturesManager.determine_framework(self.test_model )
43
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class a ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] ): """simple docstring""" return F"gaussian_noise_s={seed}_shape={'_'.join([str(snake_case__ ) for s in shape] )}.npy" def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : int , snake_case__ : List[str]=0 , snake_case__ : int=(4, 4, 64, 64) , snake_case__ : Union[str, Any]=False ): """simple docstring""" __lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return image def UpperCAmelCase__ ( self : str , snake_case__ : Any=False , snake_case__ : List[Any]="CompVis/stable-diffusion-v1-4" ): """simple docstring""" __lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCAmelCase = "bf16" if fpaa else None __lowerCAmelCase , __lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained( snake_case__ , subfolder="unet" , dtype=snake_case__ , revision=snake_case__ ) return model, params def UpperCAmelCase__ ( self : Any , snake_case__ : Tuple=0 , snake_case__ : Dict=(4, 77, 768) , snake_case__ : List[str]=False ): """simple docstring""" __lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa __lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(snake_case__ , snake_case__ ) ) , dtype=snake_case__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]], [17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]], [8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]], [3, 1_000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]], # fmt: on ] ) def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=snake_case__ ) __lowerCAmelCase = self.get_latents(snake_case__ , fpaa=snake_case__ ) __lowerCAmelCase = self.get_encoder_hidden_states(snake_case__ , fpaa=snake_case__ ) __lowerCAmelCase = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape __lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCAmelCase = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]], [17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]], [8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]], [3, 1_000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]], # fmt: on ] ) def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict ): """simple docstring""" __lowerCAmelCase , __lowerCAmelCase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=snake_case__ ) __lowerCAmelCase = self.get_latents(snake_case__ , shape=(4, 4, 96, 96) , fpaa=snake_case__ ) __lowerCAmelCase = self.get_encoder_hidden_states(snake_case__ , shape=(4, 77, 1_024) , fpaa=snake_case__ ) __lowerCAmelCase = model.apply( {"params": params} , snake_case__ , jnp.array(snake_case__ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case__ , ).sample assert sample.shape == latents.shape __lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) __lowerCAmelCase = jnp.array(snake_case__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(snake_case__ , snake_case__ , atol=1E-2 )
611
0
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple ) -> Dict: '''simple docstring''' return None class UpperCamelCase__ : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ) -> List[Any]: '''simple docstring''' return None class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' __snake_case : Optional[Any] = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(lowerCamelCase__ ,"""tf""" ,12 ,**lowerCamelCase__ ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(lowerCamelCase__ ,"""pt""" ,12 ,**lowerCamelCase__ ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: '''simple docstring''' from transformers import BertModel SCREAMING_SNAKE_CASE = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(lowerCamelCase__ ) ) vocab_file.flush() SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(lowerCamelCase__ ) ) ) model.save_pretrained(lowerCamelCase__ ) self._test_export(lowerCamelCase__ ,"""pt""" ,12 ,lowerCamelCase__ ) @require_tf @slow def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(lowerCamelCase__ ,"""tf""" ,12 ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = quantize(Path(lowerCamelCase__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: SCREAMING_SNAKE_CASE = self._test_export(lowerCamelCase__ ,"""pt""" ,12 ,**lowerCamelCase__ ) SCREAMING_SNAKE_CASE = quantize(lowerCamelCase__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(lowerCamelCase__ ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Optional[Any] ) -> List[Any]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE = Path(lowerCamelCase__ ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ) return path except Exception as e: self.fail(lowerCamelCase__ ) @require_torch @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: '''simple docstring''' from transformers import BertModel SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(lowerCamelCase__ ,lowerCamelCase__ ,"""pt""" ) @require_tf @require_tokenizers @slow def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str: '''simple docstring''' from transformers import TFBertModel SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(lowerCamelCase__ ,lowerCamelCase__ ,"""tf""" ) def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = infer_shapes(lowerCamelCase__ ,lowerCamelCase__ ) # Assert all variables are present self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,lowerCamelCase__ ) self.assertSequenceEqual(variable_names[3:] ,lowerCamelCase__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask""", """token_type_ids"""] SCREAMING_SNAKE_CASE = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() ,lowerCamelCase__ ,lowerCamelCase__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(lowerCamelCase__ ) ,3 ) # Should have exactly the same input names self.assertEqual(set(lowerCamelCase__ ) ,set(lowerCamelCase__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(lowerCamelCase__ ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() ,lowerCamelCase__ ,lowerCamelCase__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(lowerCamelCase__ ) ,1 ) self.assertEqual(len(lowerCamelCase__ ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
116
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { """ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""", } class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = "deta" __snake_case : int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : List[Any] ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Optional[int]=900 ,lowerCamelCase__ : Optional[Any]=2048 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Optional[int]=8 ,lowerCamelCase__ : List[str]=6 ,lowerCamelCase__ : Union[str, Any]=1024 ,lowerCamelCase__ : str=8 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Dict="relu" ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int="sine" ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]=300 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[str]=0.25 ,**lowerCamelCase__ : int ,) -> List[Any]: '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = backbone_config SCREAMING_SNAKE_CASE = num_queries SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = d_model SCREAMING_SNAKE_CASE = encoder_ffn_dim SCREAMING_SNAKE_CASE = encoder_layers SCREAMING_SNAKE_CASE = encoder_attention_heads SCREAMING_SNAKE_CASE = decoder_ffn_dim SCREAMING_SNAKE_CASE = decoder_layers SCREAMING_SNAKE_CASE = decoder_attention_heads SCREAMING_SNAKE_CASE = dropout SCREAMING_SNAKE_CASE = attention_dropout SCREAMING_SNAKE_CASE = activation_dropout SCREAMING_SNAKE_CASE = activation_function SCREAMING_SNAKE_CASE = init_std SCREAMING_SNAKE_CASE = init_xavier_std SCREAMING_SNAKE_CASE = encoder_layerdrop SCREAMING_SNAKE_CASE = auxiliary_loss SCREAMING_SNAKE_CASE = position_embedding_type # deformable attributes SCREAMING_SNAKE_CASE = num_feature_levels SCREAMING_SNAKE_CASE = encoder_n_points SCREAMING_SNAKE_CASE = decoder_n_points SCREAMING_SNAKE_CASE = two_stage SCREAMING_SNAKE_CASE = two_stage_num_proposals SCREAMING_SNAKE_CASE = with_box_refine SCREAMING_SNAKE_CASE = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher SCREAMING_SNAKE_CASE = class_cost SCREAMING_SNAKE_CASE = bbox_cost SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE = mask_loss_coefficient SCREAMING_SNAKE_CASE = dice_loss_coefficient SCREAMING_SNAKE_CASE = bbox_loss_coefficient SCREAMING_SNAKE_CASE = giou_loss_coefficient SCREAMING_SNAKE_CASE = eos_coefficient SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ ) @property def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE = self.__class__.model_type return output
116
1
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __A ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self , a__ , a__ , a__ , a__ = 1.0 , a__ = None , ): super().__init__() _lowerCAmelCase : int = initial_learning_rate _lowerCAmelCase : str = warmup_steps _lowerCAmelCase : Optional[int] = power _lowerCAmelCase : List[str] = decay_schedule_fn _lowerCAmelCase : Dict = name def __call__( self , a__ ): with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. _lowerCAmelCase : Optional[int] = tf.cast(a__ , tf.floataa ) _lowerCAmelCase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa ) _lowerCAmelCase : Union[str, Any] = global_step_float / warmup_steps_float _lowerCAmelCase : Dict = self.initial_learning_rate * tf.math.pow(a__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=a__ , ) def __A ( self ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : float = 0.0 ,_lowerCamelCase : float = 0.9 ,_lowerCamelCase : float = 0.9_99 ,_lowerCamelCase : float = 1e-8 ,_lowerCamelCase : Optional[float] = None ,_lowerCamelCase : Optional[float] = None ,_lowerCamelCase : float = 0.0 ,_lowerCamelCase : float = 1.0 ,_lowerCamelCase : Optional[List[str]] = None ,) -> Optional[Any]: _lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=_lowerCamelCase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=_lowerCamelCase ,) if num_warmup_steps: _lowerCAmelCase : Optional[Any] = WarmUp( initial_learning_rate=_lowerCamelCase ,decay_schedule_fn=_lowerCamelCase ,warmup_steps=_lowerCamelCase ,) if weight_decay_rate > 0.0: _lowerCAmelCase : Any = AdamWeightDecay( learning_rate=_lowerCamelCase ,weight_decay_rate=_lowerCamelCase ,beta_a=_lowerCamelCase ,beta_a=_lowerCamelCase ,epsilon=_lowerCamelCase ,clipnorm=_lowerCamelCase ,global_clipnorm=_lowerCamelCase ,exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] ,include_in_weight_decay=_lowerCamelCase ,) else: _lowerCAmelCase : List[Any] = tf.keras.optimizers.Adam( learning_rate=_lowerCamelCase ,beta_a=_lowerCamelCase ,beta_a=_lowerCamelCase ,epsilon=_lowerCamelCase ,clipnorm=_lowerCamelCase ,global_clipnorm=_lowerCamelCase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self , a__ = 0.0_0_1 , a__ = 0.9 , a__ = 0.9_9_9 , a__ = 1e-7 , a__ = False , a__ = 0.0 , a__ = None , a__ = None , a__ = "AdamWeightDecay" , **a__ , ): super().__init__(a__ , a__ , a__ , a__ , a__ , a__ , **a__ ) _lowerCAmelCase : Union[str, Any] = weight_decay_rate _lowerCAmelCase : Union[str, Any] = include_in_weight_decay _lowerCAmelCase : Dict = exclude_from_weight_decay @classmethod def __A ( cls , a__ ): _lowerCAmelCase : List[str] = {"""WarmUp""": WarmUp} return super(a__ , cls ).from_config(a__ , custom_objects=a__ ) def __A ( self , a__ , a__ , a__ ): super(a__ , self )._prepare_local(a__ , a__ , a__ ) _lowerCAmelCase : Any = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def __A ( self , a__ , a__ , a__ ): _lowerCAmelCase : int = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def __A ( self , a__ , a__=None , **a__ ): _lowerCAmelCase , _lowerCAmelCase : Tuple = list(zip(*a__ ) ) return super(a__ , self ).apply_gradients(zip(a__ , a__ ) , name=a__ , **a__ ) def __A ( self , a__ , a__ , a__ ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} _lowerCAmelCase : Dict = apply_state or {} _lowerCAmelCase : Optional[int] = apply_state.get((var_device, var_dtype) ) if coefficients is None: _lowerCAmelCase : Optional[int] = self._fallback_apply_state(a__ , a__ ) _lowerCAmelCase : Tuple = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def __A ( self , a__ , a__ , a__=None ): _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , a__ ) _lowerCAmelCase : Optional[int] = self._decay_weights_op(a__ , a__ , a__ ) with tf.control_dependencies([decay] ): return super(a__ , self )._resource_apply_dense(a__ , a__ , **a__ ) def __A ( self , a__ , a__ , a__ , a__=None ): _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , a__ ) _lowerCAmelCase : Union[str, Any] = self._decay_weights_op(a__ , a__ , a__ ) with tf.control_dependencies([decay] ): return super(a__ , self )._resource_apply_sparse(a__ , a__ , a__ , **a__ ) def __A ( self ): _lowerCAmelCase : Union[str, Any] = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def __A ( self , a__ ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(a__ , a__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(a__ , a__ ) is not None: return False return True class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self ): _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : Optional[int] = None @property def __A ( self ): if self._accum_steps is None: _lowerCAmelCase : Dict = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def __A ( self ): if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , a__ ): if not self._gradients: _lowerCAmelCase : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(a__ ) , trainable=a__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(a__ ) != len(self._gradients ): raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(a__ )}" ) for accum_gradient, gradient in zip(self._gradients , a__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(a__ ) self._accum_steps.assign_add(1 ) def __A ( self ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(a__ ) )
213
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __A ( unittest.TestCase ): @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : Dict = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , ) return model @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(a__ ) def __A ( self ): _lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet _lowerCAmelCase : int = DDIMScheduler() _lowerCAmelCase : Any = self.dummy_vq_model _lowerCAmelCase : List[str] = LDMPipeline(unet=a__ , vqvae=a__ , scheduler=a__ ) ldm.to(a__ ) ldm.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : List[Any] = torch.manual_seed(0 ) _lowerCAmelCase : Optional[Any] = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" ).images _lowerCAmelCase : Any = torch.manual_seed(0 ) _lowerCAmelCase : Tuple = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=a__ )[0] _lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] _lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : List[Any] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] ) _lowerCAmelCase : List[str] = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class __A ( unittest.TestCase ): def __A ( self ): _lowerCAmelCase : Any = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(a__ ) ldm.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : int = torch.manual_seed(0 ) _lowerCAmelCase : List[str] = ldm(generator=a__ , num_inference_steps=5 , output_type="""numpy""" ).images _lowerCAmelCase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCAmelCase : Union[str, Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] ) _lowerCAmelCase : str = 1e-2 if torch_device != """mps""" else 3e-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
213
1
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[str] =["""image_processor""", """tokenizer"""] __a : Optional[Any] ="""BridgeTowerImageProcessor""" __a : Optional[Any] =("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCAmelCase = self.tokenizer( text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel_values + pixel_mask lowerCAmelCase = self.image_processor( UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ ) encoding.update(UpperCAmelCase_ ) return encoding def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __snake_case ( self ): lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
716
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
0
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) lowercase : str = logging.getLogger(__name__) def __a ( ) -> int: lowerCAmelCase = argparse.ArgumentParser( description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." ) parser.add_argument("--file_path" , type=lowerCAmelCase__ , default="data/dump.txt" , help="The path to the data." ) parser.add_argument("--tokenizer_type" , type=lowerCAmelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] ) parser.add_argument("--tokenizer_name" , type=lowerCAmelCase__ , default="bert-base-uncased" , help="The tokenizer to use." ) parser.add_argument("--dump_file" , type=lowerCAmelCase__ , default="data/dump" , help="The dump file prefix." ) lowerCAmelCase = parser.parse_args() logger.info(f"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map["cls_token"] # `[CLS]` lowerCAmelCase = tokenizer.special_tokens_map["sep_token"] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map["cls_token"] # `<s>` lowerCAmelCase = tokenizer.special_tokens_map["sep_token"] # `</s>` elif args.tokenizer_type == "gpt2": lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCAmelCase = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>` lowerCAmelCase = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>` logger.info(f"Loading text from {args.file_path}" ) with open(args.file_path , "r" , encoding="utf8" ) as fp: lowerCAmelCase = fp.readlines() logger.info("Start encoding" ) logger.info(f"{len(lowerCAmelCase__ )} examples to process." ) lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = 1_0000 lowerCAmelCase = time.time() for text in data: lowerCAmelCase = f"{bos} {text.strip()} {sep}" lowerCAmelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) rslt.append(lowerCAmelCase__ ) iter += 1 if iter % interval == 0: lowerCAmelCase = time.time() logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) lowerCAmelCase = time.time() logger.info("Finished binarization" ) logger.info(f"{len(lowerCAmelCase__ )} examples processed." ) lowerCAmelCase = f"{args.dump_file}.{args.tokenizer_name}.pickle" lowerCAmelCase = tokenizer.vocab_size if vocab_size < (1 << 16): lowerCAmelCase = [np.uintaa(lowerCAmelCase__ ) for d in rslt] else: lowerCAmelCase = [np.intaa(lowerCAmelCase__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"Dump to {dp_file}" ) with open(lowerCAmelCase__ , "wb" ) as handle: pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
649
"""simple docstring""" def a__ ( lowerCAmelCase__ ): UpperCAmelCase_ = 0 UpperCAmelCase_ = len(lowerCAmelCase__ ) for i in range(n - 1 ): for j in range(i + 1 , lowerCAmelCase__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def a__ ( lowerCAmelCase__ ): if len(lowerCAmelCase__ ) <= 1: return arr, 0 UpperCAmelCase_ = len(lowerCAmelCase__ ) // 2 UpperCAmelCase_ = arr[0:mid] UpperCAmelCase_ = arr[mid:] UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions return c, num_inversions def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase_ = [] UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0 while i < len(lowerCAmelCase__ ) and j < len(lowerCAmelCase__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCAmelCase__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(lowerCAmelCase__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def a__ ( ): UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ ) assert num_inversions_bf == num_inversions_recursive == 8 print("number of inversions = " , lowerCAmelCase__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , lowerCAmelCase__ ) # an empty list should also have zero inversions UpperCAmelCase_ = [] UpperCAmelCase_ = count_inversions_bf(lowerCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(lowerCAmelCase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print("number of inversions = " , lowerCAmelCase__ ) if __name__ == "__main__": main()
82
0
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase ( _lowercase , unittest.TestCase ): UpperCAmelCase : str = BlenderbotSmallTokenizer UpperCAmelCase : Tuple = False def UpperCAmelCase__ (self : Optional[int] ) -> List[str]: super().setUp() lowercase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] lowercase = dict(zip(A__ , range(len(A__ ) ) ) ) lowercase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] lowercase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(A__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(A__ ) ) def UpperCAmelCase__ (self : List[str] , **A__ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCAmelCase__ (self : Tuple , A__ : List[Any] ) -> List[Any]: lowercase = "adapt act apte" lowercase = "adapt act apte" return input_text, output_text def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]: lowercase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase = "adapt act apte" lowercase = ["adapt", "act", "ap@@", "te"] lowercase = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) lowercase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] lowercase = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]: lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) assert tok("sam" ).input_ids == [1_3_8_4] lowercase = "I am a small frog." lowercase = tok([src_text] , padding=A__ , truncation=A__ )["input_ids"] lowercase = tok.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ (self : Optional[int] ) -> List[str]: lowercase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) lowercase = "I am a small frog ." lowercase = "." lowercase = tok(A__ )["input_ids"] lowercase = tok(A__ )["input_ids"] assert encoded[-1] == encoded_dot[0]
721
'''simple docstring''' from functools import lru_cache def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" lowercase = 2 lowercase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(lowerCAmelCase_ ) if n > 1: factors.add(lowerCAmelCase_ ) return factors @lru_cache def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" return len(unique_prime_factors(lowerCAmelCase_ ) ) def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" return len(set(lowerCAmelCase_ ) ) in (0, 1) def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" lowercase = 2 while True: # Increment each value of a generated range lowercase = [base + i for i in range(lowerCAmelCase_ )] # Run elements through out unique_prime_factors function # Append our target number to the end. lowercase = [upf_len(lowerCAmelCase_ ) for x in group] checker.append(lowerCAmelCase_ ) # If all numbers in the list are equal, return the group variable. if equality(lowerCAmelCase_ ): return group # Increment our base variable by 1 base += 1 def UpperCAmelCase_ ( lowerCAmelCase_ = 4 ): """simple docstring""" lowercase = run(lowerCAmelCase_ ) return results[0] if len(lowerCAmelCase_ ) else None if __name__ == "__main__": print(solution())
459
0
"""simple docstring""" def UpperCAmelCase_ ( __a : float , __a : float , __a : int ): '''simple docstring''' if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(__a , __a ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate _lowerCamelCase : Tuple = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly _lowerCamelCase : List[str] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
437
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal a_ = logging.get_logger(__name__) a_ = TypeVar("""DatasetType""", Dataset, IterableDataset) def UpperCAmelCase_ ( __a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__a )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." ) if i == 0: _lowerCamelCase , _lowerCamelCase : Tuple = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) else: return _interleave_iterable_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) def UpperCAmelCase_ ( __a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ): '''simple docstring''' if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " 'is an empty dataset dictionary.' ) raise ValueError( f"Dataset at position {i} has at least one split: {list(__a )}\n" f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']" ) raise ValueError( f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}." ) if i == 0: _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a ) else: return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
437
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() a_ : List[str] = logging.get_logger(__name__) def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): a__ = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) a__ = downstream_dict['''projector.weight'''] a__ = downstream_dict['''projector.bias'''] a__ = downstream_dict['''model.post_net.linear.weight'''] a__ = downstream_dict['''model.post_net.linear.bias'''] return model def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): a__ = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) a__ = downstream_dict['''model.linear.weight'''] a__ = downstream_dict['''model.linear.bias'''] return model def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): a__ = UniSpeechSatForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase ) a__ = downstream_dict['''connector.weight'''] a__ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): a__ = downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] a__ = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] a__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] a__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] a__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] a__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] a__ = downstream_dict['''objective.W'''] return model @torch.no_grad() def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): a__ = torch.load(__UpperCAmelCase , map_location='''cpu''' ) a__ = checkpoint['''Downstream'''] a__ = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase ) a__ = WavaVecaFeatureExtractor.from_pretrained( __UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase ) a__ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): a__ = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): a__ = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) elif arch.endswith('''ForXVector''' ): a__ = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: a__ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(__UpperCAmelCase ) hf_model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a_ : List[str] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') a_ : List[Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
148
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : List[str] = logging.get_logger(__name__) a_ : Tuple = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class __UpperCamelCase ( _lowercase ): """simple docstring""" _lowercase : List[Any] = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0_5_2_2 , SCREAMING_SNAKE_CASE=7_6_8 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=1_2 , SCREAMING_SNAKE_CASE=3_0_7_2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_1_2 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) a__ = vocab_size a__ = hidden_size a__ = num_hidden_layers a__ = num_attention_heads a__ = hidden_act a__ = intermediate_size a__ = hidden_dropout_prob a__ = attention_probs_dropout_prob a__ = max_position_embeddings a__ = type_vocab_size a__ = initializer_range a__ = layer_norm_eps a__ = position_embedding_type a__ = use_cache a__ = classifier_dropout class __UpperCamelCase ( _lowercase ): """simple docstring""" @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: a__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
148
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __UpperCAmelCase : Optional[int] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json' with io.open(filename, 'r', encoding='utf-8') as f: __UpperCAmelCase : int = json.load(f) @require_torch class lowerCamelCase ( unittest.TestCase ): def snake_case_ ( self : str , __snake_case : Optional[int] ) -> List[Any]: return FSMTTokenizer.from_pretrained(__snake_case ) def snake_case_ ( self : List[Any] , __snake_case : str ) -> Union[str, Any]: _a : str = FSMTForConditionalGeneration.from_pretrained(__snake_case ).to(__snake_case ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def snake_case_ ( self : Optional[int] , __snake_case : int , __snake_case : int ) -> int: # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality _a : int = f"""facebook/wmt19-{pair}""" _a : List[str] = self.get_tokenizer(__snake_case ) _a : Any = self.get_model(__snake_case ) _a : Any = bleu_data[pair]['''src'''] _a : List[Any] = bleu_data[pair]['''tgt'''] _a : Optional[Any] = tokenizer(__snake_case , return_tensors='''pt''' , truncation=__snake_case , padding='''longest''' ).to(__snake_case ) _a : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) _a : Optional[int] = tokenizer.batch_decode( __snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case ) _a : int = calculate_bleu(__snake_case , __snake_case ) print(__snake_case ) self.assertGreaterEqual(scores['''bleu'''] , __snake_case )
471
from math import sqrt def lowerCamelCase_ ( UpperCamelCase_ = 100_0000 ): _a : int = 0 _a : int = 0 _a : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(UpperCamelCase_ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
471
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
231
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self ): """simple docstring""" lowerCamelCase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase : int = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCamelCase : Optional[int] = get_activation("gelu" ) lowerCamelCase : List[str] = get_activation("gelu_10" ) lowerCamelCase : str = torch_builtin(__A ) lowerCamelCase : Any = geluaa(__A ) lowerCamelCase : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self ): """simple docstring""" get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__A ): get_activation("bogus" ) with self.assertRaises(__A ): get_activation(__A ) def _snake_case ( self ): """simple docstring""" lowerCamelCase : List[Any] = get_activation("gelu" ) lowerCamelCase : Union[str, Any] = 1 lowerCamelCase : str = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): lowerCamelCase : Union[str, Any] = acta.a
231
1
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) SCREAMING_SNAKE_CASE_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a : _lowercase = field( default=UpperCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(UpperCAmelCase )} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) _lowercase = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowercase = field( default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) _lowercase = field( default=6_4 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) _lowercase = field( default=3_0 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) _lowercase = field( default=UpperCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) _lowercase = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _lowercase = field( default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _lowercase = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) _lowercase = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class a ( UpperCAmelCase ): _lowercase = "train" _lowercase = "dev" class a ( UpperCAmelCase ): _lowercase = 42 _lowercase = 42 _lowercase = 42 _lowercase = 42 def __init__( self , A_ , A_ , A_ = None , A_ = Split.train , A_ = False , A_ = None , A_ = "pt" , ): '''simple docstring''' _UpperCAmelCase : Tuple = args _UpperCAmelCase : List[str] = is_language_sensitive _UpperCAmelCase : List[str] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(A_ , A_ ): try: _UpperCAmelCase : Any = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) _UpperCAmelCase : List[Any] = mode # Load data features from cache or dataset file _UpperCAmelCase : Optional[Any] = "v2" if args.version_2_with_negative else "v1" _UpperCAmelCase : Any = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _UpperCAmelCase : List[str] = cached_features_file + ".lock" with FileLock(A_ ): if os.path.exists(A_ ) and not args.overwrite_cache: _UpperCAmelCase : Tuple = time.time() _UpperCAmelCase : Optional[int] = torch.load(A_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. _UpperCAmelCase : Optional[Any] = self.old_features["features"] _UpperCAmelCase : Any = self.old_features.get("dataset" , A_ ) _UpperCAmelCase : List[str] = self.old_features.get("examples" , A_ ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in' " future run" ) else: if mode == Split.dev: _UpperCAmelCase : int = self.processor.get_dev_examples(args.data_dir ) else: _UpperCAmelCase : Dict = self.processor.get_train_examples(args.data_dir ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=A_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A_ , ) _UpperCAmelCase : List[str] = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , A_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , A_ ): '''simple docstring''' _UpperCAmelCase : List[str] = self.features[i] _UpperCAmelCase : Any = torch.tensor(feature.input_ids , dtype=torch.long ) _UpperCAmelCase : Any = torch.tensor(feature.attention_mask , dtype=torch.long ) _UpperCAmelCase : str = torch.tensor(feature.token_type_ids , dtype=torch.long ) _UpperCAmelCase : Tuple = torch.tensor(feature.cls_index , dtype=torch.long ) _UpperCAmelCase : Union[str, Any] = torch.tensor(feature.p_mask , dtype=torch.float ) _UpperCAmelCase : Dict = torch.tensor(feature.is_impossible , dtype=torch.float ) _UpperCAmelCase : Optional[Any] = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: _UpperCAmelCase : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) _UpperCAmelCase : Dict = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
300
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 100_0000 ) -> int: _UpperCAmelCase : str = set(range(3 , lowerCAmelCase , 2 ) ) primes.add(2 ) for p in range(3 , lowerCAmelCase , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , lowerCAmelCase , lowerCAmelCase ) ) ) _UpperCAmelCase : Tuple = [float(lowerCAmelCase ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F'''{solution() = }''')
300
1
'''simple docstring''' import pprint import requests _lowerCamelCase = 'https://zenquotes.io/api' def _SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _SCREAMING_SNAKE_CASE ( ): return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": _lowerCamelCase = random_quotes() pprint.pprint(response)
572
'''simple docstring''' from __future__ import annotations from typing import Any class __a : def __init__( self : Optional[Any] , lowercase__ : int) ->None: """simple docstring""" _lowercase = num_of_nodes _lowercase = [] _lowercase = {} def _UpperCAmelCase ( self : Optional[int] , lowercase__ : int , lowercase__ : int , lowercase__ : int) ->None: """simple docstring""" self.m_edges.append([u_node, v_node, weight]) def _UpperCAmelCase ( self : Any , lowercase__ : int) ->int: """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node]) def _UpperCAmelCase ( self : List[str] , lowercase__ : int) ->None: """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase = self.find_component(lowercase__) def _UpperCAmelCase ( self : Dict , lowercase__ : list[int] , lowercase__ : int , lowercase__ : int) ->None: """simple docstring""" if component_size[u_node] <= component_size[v_node]: _lowercase = v_node component_size[v_node] += component_size[u_node] self.set_component(lowercase__) elif component_size[u_node] >= component_size[v_node]: _lowercase = self.find_component(lowercase__) component_size[u_node] += component_size[v_node] self.set_component(lowercase__) def _UpperCAmelCase ( self : Optional[int]) ->None: """simple docstring""" _lowercase = [] _lowercase = 0 _lowercase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): self.m_component.update({node: node}) component_size.append(1) _lowercase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase = edge _lowercase = self.m_component[u] _lowercase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowercase__ , lowercase__): _lowercase , _lowercase , _lowercase = edge _lowercase = self.m_component[u] _lowercase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowercase__ , lowercase__ , lowercase__) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""") num_of_components -= 1 _lowercase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""") def _SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
572
1
from typing import Any class __magic_name__ : def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : Any ): UpperCAmelCase = data UpperCAmelCase = None def __repr__( self : Any ): return f'''Node({self.data})''' class __magic_name__ : def __init__( self : List[str] ): UpperCAmelCase = None def __iter__( self : int ): UpperCAmelCase = self.head while node: yield node.data UpperCAmelCase = node.next def __len__( self : Dict ): return sum(1 for _ in self ) def __repr__( self : Optional[int] ): return "->".join([str(__SCREAMING_SNAKE_CASE ) for item in self] ) def __getitem__( self : str ,__SCREAMING_SNAKE_CASE : int ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Any ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) UpperCAmelCase = self.head for _ in range(__SCREAMING_SNAKE_CASE ): UpperCAmelCase = current.next UpperCAmelCase = data def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Any ): self.insert_nth(len(self ) ,__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Any ): self.insert_nth(0 ,__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Any ): if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) UpperCAmelCase = Node(__SCREAMING_SNAKE_CASE ) if self.head is None: UpperCAmelCase = new_node elif index == 0: UpperCAmelCase = self.head # link new_node to head UpperCAmelCase = new_node else: UpperCAmelCase = self.head for _ in range(index - 1 ): UpperCAmelCase = temp.next UpperCAmelCase = temp.next UpperCAmelCase = new_node def _UpperCAmelCase ( self : Tuple ): # print every node data print(self ) def _UpperCAmelCase ( self : Optional[Any] ): return self.delete_nth(0 ) def _UpperCAmelCase ( self : Any ): # delete from tail return self.delete_nth(len(self ) - 1 ) def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) UpperCAmelCase = self.head # default first node if index == 0: UpperCAmelCase = self.head.next else: UpperCAmelCase = self.head for _ in range(index - 1 ): UpperCAmelCase = temp.next UpperCAmelCase = temp.next UpperCAmelCase = temp.next.next return delete_node.data def _UpperCAmelCase ( self : Tuple ): return self.head is None def _UpperCAmelCase ( self : Union[str, Any] ): UpperCAmelCase = None UpperCAmelCase = self.head while current: # Store the current node's next node. UpperCAmelCase = current.next # Make the current node's next point backwards UpperCAmelCase = prev # Make the previous node be the current node UpperCAmelCase = current # Make the current node the next node (to progress iteration) UpperCAmelCase = next_node # Return prev in order to put the head at the end UpperCAmelCase = prev def __UpperCamelCase ( ): """simple docstring""" UpperCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_lowerCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowerCAmelCase ) == i linked_list.insert_nth(_lowerCAmelCase , i + 1 ) assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowerCAmelCase ) == 9 assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): UpperCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(-8 , 1 ) ) def __UpperCamelCase ( ): """simple docstring""" UpperCAmelCase = [ -9, 1_00, Node(77_34_51_12 ), "dlrow olleH", 7, 55_55, 0, -192.5_5555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] UpperCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_lowerCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowerCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head UpperCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail UpperCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list UpperCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(_lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowerCAmelCase ) assert ( str(_lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowerCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __UpperCamelCase ( ): """simple docstring""" from doctest import testmod testmod() UpperCAmelCase = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(_lowerCAmelCase ) print("\nReading/changing Node data using indexing:" ) print(F'''Element at Position 1: {linked_list[1]}''' ) UpperCAmelCase = input("Enter New Value: " ).strip() print("New list:" ) print(_lowerCAmelCase ) print(F'''length of linked_list is : {len(_lowerCAmelCase )}''' ) if __name__ == "__main__": main()
333
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __magic_name__ ( _a): def _UpperCAmelCase ( self : Tuple ): UpperCAmelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"width_multiplier" ) ) class __magic_name__ : def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_3 ,__SCREAMING_SNAKE_CASE : Optional[int]=6_4 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : List[str]=3 ,__SCREAMING_SNAKE_CASE : int="swish" ,__SCREAMING_SNAKE_CASE : str=3 ,__SCREAMING_SNAKE_CASE : Optional[int]=3_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : str=1_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : int=0.25 ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.0 ,): UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = image_size UpperCAmelCase = patch_size UpperCAmelCase = num_channels UpperCAmelCase = make_divisible(5_1_2 * width_multiplier ,divisor=8 ) UpperCAmelCase = hidden_act UpperCAmelCase = conv_kernel_size UpperCAmelCase = output_stride UpperCAmelCase = classifier_dropout_prob UpperCAmelCase = use_labels UpperCAmelCase = is_training UpperCAmelCase = num_labels UpperCAmelCase = initializer_range UpperCAmelCase = scope UpperCAmelCase = width_multiplier UpperCAmelCase = ffn_dropout UpperCAmelCase = attn_dropout def _UpperCAmelCase ( self : List[Any] ): UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) UpperCAmelCase = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCAmelCase ( self : Dict ): return MobileViTVaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,) def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ): UpperCAmelCase = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ): UpperCAmelCase = self.num_labels UpperCAmelCase = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ): UpperCAmelCase = self.num_labels UpperCAmelCase = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _a , _a , unittest.TestCase): _UpperCAmelCase : Tuple = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) _UpperCAmelCase : List[Any] = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : int = False _UpperCAmelCase : Any = False def _UpperCAmelCase ( self : Tuple ): UpperCAmelCase = MobileViTVaModelTester(self ) UpperCAmelCase = MobileViTVaConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds" ) def _UpperCAmelCase ( self : Optional[int] ): pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings" ) def _UpperCAmelCase ( self : Optional[int] ): pass @unittest.skip(reason="MobileViTV2 does not output attentions" ) def _UpperCAmelCase ( self : Any ): pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." ) def _UpperCAmelCase ( self : Optional[int] ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCAmelCase ( self : Any ): pass def _UpperCAmelCase ( self : str ): UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase = [*signature.parameters.keys()] UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Any ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : List[str] ): def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[str] ): UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) ) UpperCAmelCase = outputs.hidden_states UpperCAmelCase = 5 self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCAmelCase = 2 for i in range(len(__SCREAMING_SNAKE_CASE ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase = True check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase = True check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : str ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE ) @slow def _UpperCAmelCase ( self : Optional[int] ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( ): """simple docstring""" UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self : Any ): return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self : Dict ): UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to( __SCREAMING_SNAKE_CASE ) UpperCAmelCase = self.default_image_processor UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE ) UpperCAmelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self : Optional[Any] ): UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE ) UpperCAmelCase = outputs.logits # verify the logits UpperCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) ) self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE ) UpperCAmelCase = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] ,device=__SCREAMING_SNAKE_CASE ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self : List[str] ): UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) UpperCAmelCase = prepare_img() UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE ) UpperCAmelCase = outputs.logits.detach().cpu() UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ,target_sizes=[(5_0, 6_0)] ) UpperCAmelCase = torch.Size((5_0, 6_0) ) self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE ) UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ) UpperCAmelCase = torch.Size((3_2, 3_2) ) self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
333
1
"""simple docstring""" def lowercase__(A ) ->bool: """simple docstring""" lowercase__ : Dict= n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
85
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() a : Optional[Any] = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__(A , A , A , A , A=False , A=True ) ->Union[str, Any]: """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) lowercase__ : List[Any]= config_class.from_json_file(A ) lowercase__ : Any= True lowercase__ : List[str]= True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ : Optional[int]= model_class(A ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ : List[str]= cached_file( A , A , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ : Union[str, Any]= load_pytorch_checkpoint_in_tfa_model(A , A ) if compare_with_pt_model: lowercase__ : Any= tf_model(tf_model.dummy_inputs , training=A ) # build the network lowercase__ : Optional[Any]= torch.load(A , map_location="cpu" ) lowercase__ : Union[str, Any]= pt_model_class.from_pretrained( pretrained_model_name_or_path=A , config=A , state_dict=A ) with torch.no_grad(): lowercase__ : str= pt_model(**pt_model.dummy_inputs ) lowercase__ : Tuple= pto[0].numpy() lowercase__ : List[Any]= tfo[0].numpy() lowercase__ : Any= np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(A , save_format="h5" ) def lowercase__(A , A , A=None , A=None , A=False , A=False , A=False , A=False , ) ->List[Any]: """simple docstring""" if args_model_type is None: lowercase__ : Tuple= list(MODEL_CLASSES.keys() ) else: lowercase__ : Optional[int]= [args_model_type] for j, model_type in enumerate(A , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(A )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ : Optional[int]= MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ : int= list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ : Any= model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A , A ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ : Any= model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(A )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ : List[str]= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Union[str, Any]= config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ : str= cached_file(A , A , force_download=not use_cached_models ) else: lowercase__ : Any= model_shortcut_name if os.path.isfile(A ): lowercase__ : Dict= "converted_model" convert_pt_checkpoint_to_tf( model_type=A , pytorch_checkpoint_path=A , config_file=A , tf_dump_path=os.path.join(A , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=A , ) if remove_cached_files: os.remove(A ) os.remove(A ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """ """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") a : List[str] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
85
1
'''simple docstring''' import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ): __UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) __UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase ) model.save_pretrained(lowerCAmelCase ) AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
396
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _UpperCamelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class _lowercase( unittest.TestCase ): """simple docstring""" __lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __lowerCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def snake_case ( self: Optional[Any] ,a: Optional[int] ,a: Tuple ,a: Tuple ): __UpperCAmelCase = ZeroShotClassificationPipeline( model=a ,tokenizer=a ,candidate_labels=['polics', 'health'] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def snake_case ( self: int ,a: Union[str, Any] ,a: List[str] ): __UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics' ) self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} ) # No kwarg __UpperCAmelCase = classifier('Who are you voting for in 2020?' ,['politics'] ) self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} ) __UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics'] ) self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} ) __UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels='politics, public health' ) self.assertEqual( a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 ) __UpperCAmelCase = classifier('Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health'] ) self.assertEqual( a ,{'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) ,1.0 ) __UpperCAmelCase = classifier( 'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='This text is about {}' ) self.assertEqual(a ,{'sequence': ANY(a ), 'labels': [ANY(a )], 'scores': [ANY(a )]} ) # https://github.com/huggingface/transformers/issues/13846 __UpperCAmelCase = classifier(['I am happy'] ,['positive', 'negative'] ) self.assertEqual( a ,[ {'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} for i in range(1 ) ] ,) __UpperCAmelCase = classifier(['I am happy', 'I am sad'] ,['positive', 'negative'] ) self.assertEqual( a ,[ {'sequence': ANY(a ), 'labels': [ANY(a ), ANY(a )], 'scores': [ANY(a ), ANY(a )]} for i in range(2 ) ] ,) with self.assertRaises(a ): classifier('' ,candidate_labels='politics' ) with self.assertRaises(a ): classifier(a ,candidate_labels='politics' ) with self.assertRaises(a ): classifier('Who are you voting for in 2020?' ,candidate_labels='' ) with self.assertRaises(a ): classifier('Who are you voting for in 2020?' ,candidate_labels=a ) with self.assertRaises(a ): classifier( 'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template='Not formatting template' ,) with self.assertRaises(a ): classifier( 'Who are you voting for in 2020?' ,candidate_labels='politics' ,hypothesis_template=a ,) self.run_entailment_id(a ) def snake_case ( self: int ,a: Pipeline ): __UpperCAmelCase = zero_shot_classifier.model.config __UpperCAmelCase = config.labelaid __UpperCAmelCase = zero_shot_classifier.entailment_id __UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id ,-1 ) __UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) __UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id ,0 ) __UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id ,2 ) __UpperCAmelCase = original_labelaid self.assertEqual(a ,zero_shot_classifier.entailment_id ) @require_torch def snake_case ( self: List[Any] ): __UpperCAmelCase = pipeline( 'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 100 ,candidate_labels=['politics', 'public health', 'science'] ) @require_torch def snake_case ( self: Tuple ): __UpperCAmelCase = pipeline( 'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='pt' ,) __UpperCAmelCase = zero_shot_classifier( 'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(a ) ,{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333], } ,) @require_tf def snake_case ( self: int ): __UpperCAmelCase = pipeline( 'zero-shot-classification' ,model='sshleifer/tiny-distilbert-base-cased-distilled-squad' ,framework='tf' ,) __UpperCAmelCase = zero_shot_classifier( 'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(a ) ,{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333], } ,) @slow @require_torch def snake_case ( self: int ): __UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='pt' ) __UpperCAmelCase = zero_shot_classifier( 'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(a ) ,{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009], } ,) __UpperCAmelCase = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,) self.assertEqual( nested_simplify(a ) ,{ 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018], } ,) @slow @require_tf def snake_case ( self: str ): __UpperCAmelCase = pipeline('zero-shot-classification' ,model='roberta-large-mnli' ,framework='tf' ) __UpperCAmelCase = zero_shot_classifier( 'Who are you voting for in 2020?' ,candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(a ) ,{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009], } ,) __UpperCAmelCase = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' ,candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] ,multi_label=a ,) self.assertEqual( nested_simplify(a ) ,{ 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018], } ,)
396
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCAmelCase : Optional[Any] = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> int: '''simple docstring''' return int((input_a, input_a).count(0) != 0) def SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' assert nand_gate(0 , 0) == 1 assert nand_gate(0 , 1) == 1 assert nand_gate(1 , 0) == 1 assert nand_gate(1 , 1) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
125
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() A = logging.get_logger(__name__) A = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any) -> List[str]: '''simple docstring''' for attribute in key.split('.'): _lowercase : Dict = getattr(lowerCAmelCase__ , lowerCAmelCase__) if weight_type is not None: _lowercase : int = getattr(lowerCAmelCase__ , lowerCAmelCase__).shape else: _lowercase : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": _lowercase : Optional[Any] = value elif weight_type == "weight_g": _lowercase : Tuple = value elif weight_type == "weight_v": _lowercase : List[str] = value elif weight_type == "bias": _lowercase : Tuple = value else: _lowercase : Dict = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''') def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple) -> str: '''simple docstring''' _lowercase : Tuple = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : int = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : Tuple = True else: for key, mapped_key in MAPPING.items(): _lowercase : str = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.')[-1] == name.split('.')[0] and not is_finetuned): _lowercase : Dict = True if "*" in mapped_key: _lowercase : int = name.split(lowerCAmelCase__)[0].split('.')[-2] _lowercase : Optional[Any] = mapped_key.replace('*' , lowerCAmelCase__) if "weight_g" in name: _lowercase : int = 'weight_g' elif "weight_v" in name: _lowercase : Optional[int] = 'weight_v' elif "weight" in name: _lowercase : Tuple = 'weight' elif "bias" in name: _lowercase : int = 'bias' else: _lowercase : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) continue if not is_used: unused_weights.append(lowerCAmelCase__) logger.warning(F'''Unused weights: {unused_weights}''') def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple) -> int: '''simple docstring''' _lowercase : str = full_name.split('conv_layers.')[-1] _lowercase : List[str] = name.split('.') _lowercase : Optional[Any] = int(items[0]) _lowercase : Tuple = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _lowercase : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _lowercase : Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _lowercase : Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _lowercase : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(lowerCAmelCase__) @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict=True) -> Tuple: '''simple docstring''' if config_path is not None: _lowercase : List[str] = HubertConfig.from_pretrained(lowerCAmelCase__) else: _lowercase : List[str] = HubertConfig() if is_finetuned: if dict_path: _lowercase : Any = Dictionary.load(lowerCAmelCase__) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Tuple = target_dict.pad_index _lowercase : List[str] = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : Dict = len(target_dict.symbols) _lowercase : Optional[int] = os.path.join(lowerCAmelCase__ , 'vocab.json') if not os.path.isdir(lowerCAmelCase__): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__)) return os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__) with open(lowerCAmelCase__ , 'w' , encoding='utf-8') as vocab_handle: json.dump(target_dict.indices , lowerCAmelCase__) _lowercase : List[Any] = WavaVecaCTCTokenizer( lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , ) _lowercase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False _lowercase : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ) _lowercase : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) processor.save_pretrained(lowerCAmelCase__) _lowercase : Optional[Any] = HubertForCTC(lowerCAmelCase__) else: _lowercase : Union[str, Any] = HubertModel(lowerCAmelCase__) if is_finetuned: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/')[:-1])}) else: _lowercase , _lowercase , _lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) _lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) hf_wavavec.save_pretrained(lowerCAmelCase__) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) A = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
125
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase = { 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
112
"""simple docstring""" import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class lowerCamelCase_ : """simple docstring""" def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ): SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_mask SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE__ = FalconModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = FalconModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ): SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) SCREAMING_SNAKE_CASE__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0] SCREAMING_SNAKE_CASE__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0] # select random slice SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCamelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : Optional[Any] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _lowerCAmelCase : str = (FalconForCausalLM,) if is_torch_available() else () _lowerCAmelCase : Optional[int] = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : str = False _lowerCAmelCase : Dict = False def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = FalconModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE__ = alibi self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = input_dict["input_ids"] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = "single_label_classification" SCREAMING_SNAKE_CASE__ = input_dict["input_ids"] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = input_dict["input_ids"] SCREAMING_SNAKE_CASE__ = FalconForCausalLM(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = input_ids.shape[0] SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values ) SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ ) for layer in range(len(UpperCAmelCase__ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = "multi_label_classification" SCREAMING_SNAKE_CASE__ = input_dict["input_ids"] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCAmelCase__ ( self ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(UpperCAmelCase__ , "use_cache" ): return SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ ) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase__ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE__ = ( getattr(UpperCAmelCase__ , "decoder_layers" , UpperCAmelCase__ ) or getattr(UpperCAmelCase__ , "num_decoder_layers" , UpperCAmelCase__ ) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "num_kv_heads" , config.num_attention_heads ) SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "d_model" , config.hidden_size ) SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE__ = outputs["past_key_values"] self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs["input_ids"].shape for i in range(UpperCAmelCase__ ): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE__ = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class lowerCamelCase_ ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase__ ( self ): SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" ) model.eval() model.to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = ( "My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday." ) SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 ) SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ )[0] self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ ) model.eval() model.to(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 ) model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 ) model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 ) @slow def lowerCAmelCase__ ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ ) model.eval() model.to(device=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ ) # Test results are the same with and without cache SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
112
1
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(snake_case__ ): UpperCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = FlaxAutoModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def __a ( self : str ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(snake_case__ ): UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) UpperCAmelCase__ : Any = FlaxAutoModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(snake_case__ ) UpperCAmelCase__ : int = FlaxBertModel.from_pretrained(snake_case__ ) UpperCAmelCase__ : int = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**snake_case__ : List[Any] ): return model(**snake_case__ ) eval(**snake_case__ ).block_until_ready() @slow def __a ( self : Tuple ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(snake_case__ ) UpperCAmelCase__ : Dict = FlaxRobertaModel.from_pretrained(snake_case__ ) UpperCAmelCase__ : Any = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX ) @jax.jit def eval(**snake_case__ : int ): return model(**snake_case__ ) eval(**snake_case__ ).block_until_ready() def __a ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase__ : List[str] = FlaxAutoModel.from_pretrained("bert-base" ) def __a ( self : int ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase__ : Optional[int] = FlaxAutoModel.from_pretrained(snake_case__ , revision="aaaaaa" ) def __a ( self : str ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ): UpperCAmelCase__ : Tuple = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def __a ( self : str ): '''simple docstring''' with self.assertRaisesRegex(snake_case__ , "Use `from_pt=True` to load this model" ): UpperCAmelCase__ : Tuple = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
438
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCAmelCase : List[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _lowerCAmelCase : List[str] = { """allenai/led-base-16384""": 16_384, } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =LEDTokenizer SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int=False , snake_case__ : Optional[int]=True , **snake_case__ : List[Any] , ): '''simple docstring''' super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space: UpperCAmelCase__ : Dict = getattr(snake_case__ , pre_tok_state.pop("type" ) ) UpperCAmelCase__ : str = add_prefix_space UpperCAmelCase__ : Any = pre_tok_class(**snake_case__ ) UpperCAmelCase__ : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase__ : List[str] = "post_processor" UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ ) if tokenizer_component_instance: UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: UpperCAmelCase__ : Any = tuple(state["cls"] ) UpperCAmelCase__ : Any = False if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space: UpperCAmelCase__ : Union[str, Any] = add_prefix_space UpperCAmelCase__ : List[Any] = True if state.get("trim_offsets" , snake_case__ ) != trim_offsets: UpperCAmelCase__ : Optional[int] = trim_offsets UpperCAmelCase__ : List[Any] = True if changes_to_apply: UpperCAmelCase__ : List[str] = getattr(snake_case__ , state.pop("type" ) ) UpperCAmelCase__ : Optional[int] = component_class(**snake_case__ ) setattr(self.backend_tokenizer , snake_case__ , snake_case__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __a ( self : Any ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __a ( self : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value UpperCAmelCase__ : Dict = value def __a ( self : str , *snake_case__ : Any , **snake_case__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.get("is_split_into_words" , snake_case__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*snake_case__ , **snake_case__ ) def __a ( self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*snake_case__ , **snake_case__ ) def __a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def __a ( self : str , snake_case__ : List[Any] , snake_case__ : str=None ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] UpperCAmelCase__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self : Any , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ): '''simple docstring''' UpperCAmelCase__ : str = super()._pad( encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase__ : List[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase__ : Any = len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ ) if needs_to_be_padded: UpperCAmelCase__ : List[str] = len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase__ : Dict = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
438
1
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class lowerCamelCase ( _lowerCamelCase ): '''simple docstring''' UpperCamelCase__ =42 UpperCamelCase__ =None def lowercase__ ( __A: int ,__A: Optional[Any]=0.999 ,__A: int="cosine" ,): '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__A: Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__A: List[str] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __magic_name__ : List[Any] = [] for i in range(__A ): __magic_name__ : Tuple = i / num_diffusion_timesteps __magic_name__ : int = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ) ,__A ) ) return torch.tensor(__A ,dtype=torch.floataa ) class lowerCamelCase ( _lowerCamelCase ,_lowerCamelCase ): '''simple docstring''' @register_to_config def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 1000 , lowerCamelCase_ : str = "fixed_small_log" , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[float] = 1.0 , lowerCamelCase_ : str = "epsilon" , lowerCamelCase_ : str = "squaredcos_cap_v2" , ) -> List[Any]: if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) __magic_name__ : Optional[int] = betas_for_alpha_bar(lowerCamelCase_ ) __magic_name__ : Union[str, Any] = 1.0 - self.betas __magic_name__ : Optional[int] = torch.cumprod(self.alphas , dim=0 ) __magic_name__ : str = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __magic_name__ : Tuple = 1.0 # setable values __magic_name__ : List[str] = None __magic_name__ : List[str] = torch.from_numpy(np.arange(0 , lowerCamelCase_ )[::-1].copy() ) __magic_name__ : str = variance_type def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None ) -> torch.FloatTensor: return sample def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, torch.device] = None ) -> List[str]: __magic_name__ : int = num_inference_steps __magic_name__ : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __magic_name__ : int = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __magic_name__ : Tuple = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ ) def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=None ) -> Union[str, Any]: if prev_timestep is None: __magic_name__ : Dict = t - 1 __magic_name__ : Any = self.alphas_cumprod[t] __magic_name__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __magic_name__ : Union[str, Any] = 1 - alpha_prod_t __magic_name__ : int = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __magic_name__ : str = self.betas[t] else: __magic_name__ : str = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __magic_name__ : List[str] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __magic_name__ : str = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __magic_name__ : Any = torch.log(torch.clamp(lowerCamelCase_ , min=1E-20 ) ) __magic_name__ : List[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __magic_name__ : Any = variance.log() __magic_name__ : Union[str, Any] = beta.log() __magic_name__ : Any = (predicted_variance + 1) / 2 __magic_name__ : str = frac * max_log + (1 - frac) * min_log return variance def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]: __magic_name__ : List[str] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __magic_name__ , __magic_name__ : List[Any] = torch.split(lowerCamelCase_ , sample.shape[1] , dim=1 ) else: __magic_name__ : Union[str, Any] = None # 1. compute alphas, betas if prev_timestep is None: __magic_name__ : Optional[Any] = t - 1 __magic_name__ : Optional[int] = self.alphas_cumprod[t] __magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __magic_name__ : Union[str, Any] = 1 - alpha_prod_t __magic_name__ : Optional[int] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __magic_name__ : List[Any] = self.betas[t] __magic_name__ : Union[str, Any] = self.alphas[t] else: __magic_name__ : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev __magic_name__ : List[Any] = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __magic_name__ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __magic_name__ : Union[str, Any] = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __magic_name__ : Any = torch.clamp( lowerCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __magic_name__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __magic_name__ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __magic_name__ : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __magic_name__ : List[Any] = 0 if t > 0: __magic_name__ : Tuple = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase_ , device=model_output.device ) __magic_name__ : Optional[Any] = self._get_variance( lowerCamelCase_ , predicted_variance=lowerCamelCase_ , prev_timestep=lowerCamelCase_ , ) if self.variance_type == "fixed_small_log": __magic_name__ : List[Any] = variance elif self.variance_type == "learned_range": __magic_name__ : Any = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) __magic_name__ : int = variance * variance_noise __magic_name__ : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase_ , pred_original_sample=lowerCamelCase_ ) def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : torch.IntTensor , ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples __magic_name__ : Optional[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __magic_name__ : Union[str, Any] = timesteps.to(original_samples.device ) __magic_name__ : str = alphas_cumprod[timesteps] ** 0.5 __magic_name__ : Any = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __magic_name__ : Any = sqrt_alpha_prod.unsqueeze(-1 ) __magic_name__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 __magic_name__ : List[str] = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __magic_name__ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __magic_name__ : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
501
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class lowerCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase_ : float , lowerCamelCase_ : Callable , lowerCamelCase_ : int , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : str = None , ) -> Optional[int]: super().__init__() __magic_name__ : Union[str, Any] = initial_learning_rate __magic_name__ : Optional[int] = warmup_steps __magic_name__ : Union[str, Any] = power __magic_name__ : List[Any] = decay_schedule_fn __magic_name__ : Dict = name def __call__( self : Any , lowerCamelCase_ : Dict ) -> Optional[Any]: with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __magic_name__ : Union[str, Any] = tf.cast(lowerCamelCase_ , tf.floataa ) __magic_name__ : Dict = tf.cast(self.warmup_steps , tf.floataa ) __magic_name__ : Dict = global_step_float / warmup_steps_float __magic_name__ : List[str] = self.initial_learning_rate * tf.math.pow(lowerCamelCase_ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase_ , ) def UpperCAmelCase__ ( self : Optional[int] ) -> str: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def lowercase__ ( __A: float ,__A: int ,__A: int ,__A: float = 0.0 ,__A: float = 0.9 ,__A: float = 0.999 ,__A: float = 1e-8 ,__A: Optional[float] = None ,__A: Optional[float] = None ,__A: float = 0.0 ,__A: float = 1.0 ,__A: Optional[List[str]] = None ,): '''simple docstring''' __magic_name__ : Any = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__A ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__A ,) if num_warmup_steps: __magic_name__ : Tuple = WarmUp( initial_learning_rate=__A ,decay_schedule_fn=__A ,warmup_steps=__A ,) if weight_decay_rate > 0.0: __magic_name__ : Union[str, Any] = AdamWeightDecay( learning_rate=__A ,weight_decay_rate=__A ,beta_a=__A ,beta_a=__A ,epsilon=__A ,clipnorm=__A ,global_clipnorm=__A ,exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] ,include_in_weight_decay=__A ,) else: __magic_name__ : str = tf.keras.optimizers.Adam( learning_rate=__A ,beta_a=__A ,beta_a=__A ,epsilon=__A ,clipnorm=__A ,global_clipnorm=__A ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class lowerCamelCase ( _lowerCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , lowerCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , lowerCamelCase_ : float = 0.9 , lowerCamelCase_ : float = 0.9_9_9 , lowerCamelCase_ : float = 1E-7 , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : str = "AdamWeightDecay" , **lowerCamelCase_ : int , ) -> List[str]: super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : int = weight_decay_rate __magic_name__ : Tuple = include_in_weight_decay __magic_name__ : Union[str, Any] = exclude_from_weight_decay @classmethod def UpperCAmelCase__ ( cls : int , lowerCamelCase_ : int ) -> Optional[Any]: __magic_name__ : Tuple = {'''WarmUp''': WarmUp} return super(lowerCamelCase_ , cls ).from_config(lowerCamelCase_ , custom_objects=lowerCamelCase_ ) def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ) -> int: super(lowerCamelCase_ , self )._prepare_local(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ) -> Optional[Any]: __magic_name__ : Optional[int] = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def UpperCAmelCase__ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int=None , **lowerCamelCase_ : Union[str, Any] ) -> Optional[int]: __magic_name__ , __magic_name__ : Dict = list(zip(*lowerCamelCase_ ) ) return super(lowerCamelCase_ , self ).apply_gradients(zip(lowerCamelCase_ , lowerCamelCase_ ) , name=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]: if apply_state is None: return self._decayed_lr_t[var_dtype], {} __magic_name__ : str = apply_state or {} __magic_name__ : Optional[Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: __magic_name__ : List[str] = self._fallback_apply_state(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ : List[Any] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict=None ) -> Optional[Any]: __magic_name__ , __magic_name__ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ ) __magic_name__ : Optional[Any] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase_ , self )._resource_apply_dense(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any]=None ) -> Any: __magic_name__ , __magic_name__ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase_ ) __magic_name__ : Optional[int] = self._decay_weights_op(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase_ , self )._resource_apply_sparse(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ ( self : int ) -> Dict: __magic_name__ : List[Any] = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Union[str, Any]: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase_ , lowerCamelCase_ ) is not None: return False return True class lowerCamelCase ( _lowerCamelCase ): '''simple docstring''' def __init__( self : Optional[Any] ) -> Any: __magic_name__ : Optional[int] = [] __magic_name__ : Dict = None @property def UpperCAmelCase__ ( self : Optional[int] ) -> Any: if self._accum_steps is None: __magic_name__ : Optional[Any] = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : Optional[int] , lowerCamelCase_ : Any ) -> List[str]: if not self._gradients: __magic_name__ : int = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase_ ) , trainable=lowerCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase_ ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase_ )}''' ) for accum_gradient, gradient in zip(self._gradients , lowerCamelCase_ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase_ ) self._accum_steps.assign_add(1 ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase_ ) )
501
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: A_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=7 , __lowerCamelCase : str=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : int=30 , __lowerCamelCase : Optional[Any]=4_00 , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=None , ) -> Union[str, Any]: '''simple docstring''' lowercase = size if size is not None else {'''height''': 20, '''width''': 20} lowercase = parent lowercase = batch_size lowercase = num_channels lowercase = image_size lowercase = min_resolution lowercase = max_resolution lowercase = size lowercase = do_normalize lowercase = do_convert_rgb lowercase = [5_12, 10_24, 20_48, 40_96] lowercase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def __a ( self : Dict ) -> Dict: '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' lowercase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' lowercase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __lowercase ( _A , unittest.TestCase ): lowercase = PixaStructImageProcessor if is_vision_available() else None def __a ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' lowercase = PixaStructImageProcessingTester(self ) @property def __a ( self : str ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Any ) -> Any: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) ) def __a ( self : List[str] ) -> Optional[Any]: '''simple docstring''' lowercase = self.image_processor_tester.prepare_dummy_image() lowercase = self.image_processing_class(**self.image_processor_dict ) lowercase = 20_48 lowercase = image_processor(__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def __a ( self : Dict ) -> Optional[Any]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowercase = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowercase = image_processor( __lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[str] ) -> Optional[int]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowercase = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 lowercase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowerCamelCase ): lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches lowercase = '''Hello''' lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowercase = image_processor( __lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ) -> Union[str, Any]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) lowercase = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowercase = image_processor( __lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : int ) -> Optional[int]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input lowercase = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowercase = image_processor( __lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __lowercase ( _A , unittest.TestCase ): lowercase = PixaStructImageProcessor if is_vision_available() else None def __a ( self : int ) -> int: '''simple docstring''' lowercase = PixaStructImageProcessingTester(self , num_channels=4 ) lowercase = 3 @property def __a ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) ) def __a ( self : List[str] ) -> Any: '''simple docstring''' lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input lowercase = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched lowercase = image_processor( __lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
604
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "vocab_file": "vocab.txt", "merges_file": "bpe.codes", } A_ = { "vocab_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt", }, "merges_file": { "vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes", "vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes", }, } A_ = { "vinai/phobert-base": 256, "vinai/phobert-large": 256, } def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]: """simple docstring""" lowercase = set() lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase = char lowercase = set(UpperCAmelCase ) return pairs class __lowercase ( _A ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<mask>" , **__lowerCamelCase : int , ) -> Any: '''simple docstring''' super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , ) lowercase = vocab_file lowercase = merges_file lowercase = {} lowercase = 0 lowercase = 1 lowercase = 2 lowercase = 3 self.add_from_file(__lowerCamelCase ) lowercase = {v: k for k, v in self.encoder.items()} with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle: lowercase = merges_handle.read().split('''\n''' )[:-1] lowercase = [tuple(merge.split()[:-1] ) for merge in merges] lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) lowercase = {} def __a ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : int ) -> str: '''simple docstring''' return len(self.encoder ) def __a ( self : int ) -> Any: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : int , __lowerCamelCase : Any ) -> Optional[int]: '''simple docstring''' if token in self.cache: return self.cache[token] lowercase = tuple(__lowerCamelCase ) lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowercase = get_pairs(__lowerCamelCase ) if not pairs: return token while True: lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase ,lowercase = bigram lowercase = [] lowercase = 0 while i < len(__lowerCamelCase ): try: lowercase = word.index(__lowerCamelCase , __lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase = j if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase = tuple(__lowerCamelCase ) lowercase = new_word if len(__lowerCamelCase ) == 1: break else: lowercase = get_pairs(__lowerCamelCase ) lowercase = '''@@ '''.join(__lowerCamelCase ) lowercase = word[:-4] lowercase = word return word def __a ( self : List[str] , __lowerCamelCase : Tuple ) -> List[Any]: '''simple docstring''' lowercase = [] lowercase = re.findall(r'''\S+\n?''' , __lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) ) return split_tokens def __a ( self : Tuple , __lowerCamelCase : List[Any] ) -> Any: '''simple docstring''' return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) ) def __a ( self : str , __lowerCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' return self.decoder.get(__lowerCamelCase , self.unk_token ) def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> List[str]: '''simple docstring''' lowercase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip() return out_string def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file , __lowerCamelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.merges_file , __lowerCamelCase ) return out_vocab_file, out_merge_file def __a ( self : str , __lowerCamelCase : List[str] ) -> List[str]: '''simple docstring''' if isinstance(__lowerCamelCase , __lowerCamelCase ): try: with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(__lowerCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return lowercase = f.readlines() for lineTmp in lines: lowercase = lineTmp.strip() lowercase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowercase = line[:idx] lowercase = len(self.encoder )
604
1
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(_A ) class lowerCamelCase ( _A ): def __init__( self , **a_ ): super().__init__(**a_ ) if self.framework != "pt": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self , a_ , **a_ ): return super().__call__(a_ , **a_ ) def _lowerCamelCase ( self , **a_ ): lowerCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase : int = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCAmelCase : Optional[Any] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def _lowerCamelCase ( self , a_ , a_=None , a_="This is a sound of {}." ): if isinstance(a_ , a_ ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png lowerCAmelCase : Optional[int] = requests.get(a_ ).content else: with open(a_ , "rb" ) as f: lowerCAmelCase : Optional[int] = f.read() if isinstance(a_ , a_ ): lowerCAmelCase : Optional[Any] = ffmpeg_read(a_ , self.feature_extractor.sampling_rate ) if not isinstance(a_ , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) lowerCAmelCase : Dict = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) lowerCAmelCase : List[Any] = candidate_labels lowerCAmelCase : Optional[int] = [hypothesis_template.format(a_ ) for x in candidate_labels] lowerCAmelCase : str = self.tokenizer(a_ , return_tensors=self.framework , padding=a_ ) lowerCAmelCase : List[str] = [text_inputs] return inputs def _lowerCamelCase ( self , a_ ): lowerCAmelCase : str = model_inputs.pop("candidate_labels" ) lowerCAmelCase : int = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , a_ ): lowerCAmelCase : str = text_inputs[0] else: # Batching case. lowerCAmelCase : Optional[Any] = text_inputs[0][0] lowerCAmelCase : Tuple = self.model(**a_ , **a_ ) lowerCAmelCase : Any = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def _lowerCamelCase ( self , a_ ): lowerCAmelCase : List[str] = model_outputs.pop("candidate_labels" ) lowerCAmelCase : List[Any] = model_outputs["logits"][0] if self.framework == "pt": lowerCAmelCase : Optional[Any] = logits.softmax(dim=0 ) lowerCAmelCase : int = probs.tolist() else: raise ValueError("`tf` framework not supported." ) lowerCAmelCase : Optional[Any] = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(a_ , a_ ) , key=lambda a_ : -x[0] ) ] return result
551
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowerCAmelCase = logging.get_logger(__name__) class lowerCamelCase ( _A ): def __init__( self , *a_ , **a_ ): warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
551
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowerCamelCase__ ( UpperCAmelCase_ )-> int: """simple docstring""" UpperCamelCase = int(__A ) UpperCamelCase , UpperCamelCase , UpperCamelCase = t // 36_00, (t // 60) % 60, t % 60 return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}" def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=3_00 )-> List[str]: """simple docstring""" # docstyle-ignore return F"\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n " def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple: """simple docstring""" UpperCamelCase = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: UpperCamelCase = F"{elt:.6f}" if isinstance(__A , __A ) else str(__A ) html_code += F" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __a : UpperCamelCase_ : Tuple = 5 UpperCamelCase_ : Tuple = 0.2 def __init__( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Any = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = 300 , )-> str: """simple docstring""" UpperCamelCase = total UpperCamelCase = "" if prefix is None else prefix UpperCamelCase = leave UpperCamelCase = parent UpperCamelCase = width UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] = False , UpperCAmelCase_ : int = None )-> Dict: """simple docstring""" UpperCamelCase = value if comment is not None: UpperCamelCase = comment if self.last_value is None: UpperCamelCase = UpperCamelCase = time.time() UpperCamelCase = UpperCamelCase = value UpperCamelCase = UpperCamelCase = None UpperCamelCase = self.warmup UpperCamelCase = 1 self.update_bar(lowercase__ ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 UpperCamelCase = time.time() UpperCamelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: UpperCamelCase = self.elapsed_time / (value - self.start_value) else: UpperCamelCase = None if value >= self.total: UpperCamelCase = self.total UpperCamelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: UpperCamelCase = self.average_time_per_item * (self.total - value) self.update_bar(lowercase__ ) UpperCamelCase = value UpperCamelCase = current_time if self.average_time_per_item is None: UpperCamelCase = 1 else: UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 ) def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None )-> Optional[Any]: """simple docstring""" UpperCamelCase = " " * (len(str(self.total ) ) - len(str(lowercase__ ) )) + str(lowercase__ ) if self.elapsed_time is None: UpperCamelCase = f"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: UpperCamelCase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: UpperCamelCase = ( f"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" f" {format_time(self.predicted_remaining )}" ) self.label += f", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f", {self.comment}]" self.display() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict: """simple docstring""" UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[str]: """simple docstring""" if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class __a ( _lowerCAmelCase ): def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=None )-> int: """simple docstring""" super().__init__(lowercase__ ) UpperCamelCase = None if column_names is None else [column_names] UpperCamelCase = None def _SCREAMING_SNAKE_CASE ( self : Dict )-> int: """simple docstring""" UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowercase__ ) else: self.output.update(disp.HTML(self.html_code ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : int )-> Optional[Any]: """simple docstring""" if self.inner_table is None: UpperCamelCase = [list(values.keys() ), list(values.values() )] else: UpperCamelCase = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowercase__ ) UpperCamelCase = columns self.inner_table.append([values[c] for c in columns] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=300 )-> Union[str, Any]: """simple docstring""" UpperCamelCase = NotebookProgressBar(lowercase__ , prefix=lowercase__ , parent=self , width=lowercase__ ) return self.child_bar def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[Any]: """simple docstring""" UpperCamelCase = None self.display() class __a ( _lowerCAmelCase ): def __init__( self : List[Any] )-> str: """simple docstring""" UpperCamelCase = None UpperCamelCase = None UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] )-> Union[str, Any]: """simple docstring""" UpperCamelCase = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowercase__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] )-> List[str]: """simple docstring""" UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] )-> Tuple: """simple docstring""" if not has_length(lowercase__ ): return if self.prediction_bar is None: if self.training_tracker is not None: UpperCamelCase = self.training_tracker.add_child(len(lowercase__ ) ) else: UpperCamelCase = NotebookProgressBar(len(lowercase__ ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str )-> List[str]: """simple docstring""" if self.prediction_bar is not None: self.prediction_bar.close() UpperCamelCase = None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] )-> int: """simple docstring""" if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: UpperCamelCase = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy UpperCamelCase = state.global_step self.training_tracker.write_line(lowercase__ ) def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[Any] )-> Union[str, Any]: """simple docstring""" if self.training_tracker is not None: UpperCamelCase = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: UpperCamelCase = log["loss"] break if self.first_column == "Epoch": UpperCamelCase = int(state.epoch ) else: UpperCamelCase = state.global_step UpperCamelCase = "eval" for k in metrics: if k.endswith("_loss" ): UpperCamelCase = re.sub(r"\_loss$" , "" , lowercase__ ) UpperCamelCase = metrics.pop("total_flos" , lowercase__ ) UpperCamelCase = metrics.pop("epoch" , lowercase__ ) UpperCamelCase = metrics.pop(f"{metric_key_prefix}_runtime" , lowercase__ ) UpperCamelCase = metrics.pop(f"{metric_key_prefix}_samples_per_second" , lowercase__ ) UpperCamelCase = metrics.pop(f"{metric_key_prefix}_steps_per_second" , lowercase__ ) UpperCamelCase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , lowercase__ ) for k, v in metrics.items(): if k == f"{metric_key_prefix}_loss": UpperCamelCase = v else: UpperCamelCase = k.split("_" ) UpperCamelCase = " ".join([part.capitalize() for part in splits[1:]] ) UpperCamelCase = v self.training_tracker.write_line(lowercase__ ) self.training_tracker.remove_child() UpperCamelCase = None # Evaluation takes a long time so we should force the next update. UpperCamelCase = True def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , **UpperCAmelCase_ : int )-> Dict: """simple docstring""" self.training_tracker.update( state.global_step , comment=f"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=lowercase__ ) UpperCamelCase = None
554
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_euler''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_euler''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe([prompt] , generator=lowercase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''' ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def __UpperCamelCase ( self ): '''simple docstring''' __A =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) __A =sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) __A ='''A painting of a squirrel eating a burger''' __A =torch.manual_seed(0 ) __A =sd_pipe( [prompt] , generator=lowercase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=lowercase__ , ) __A =output.images __A =image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __A =np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
184
0
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> List[Any]: """simple docstring""" lowercase__ = load_tool('text-to-speech') self.tool.setup() def UpperCAmelCase ( self : Any) -> Dict: """simple docstring""" torch.manual_seed(0) lowercase__ = self.tool('hey') lowercase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , )) def UpperCAmelCase ( self : Dict) -> Any: """simple docstring""" torch.manual_seed(0) lowercase__ = self.tool('hey') lowercase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85]) , ))
642
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
1
import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset snake_case : Optional[Any] = random.Random() def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None ): """simple docstring""" if rng is None: a :Any = global_rng a :Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _snake_case ( unittest.TestCase ): def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=400 , _lowerCamelCase=2000 , _lowerCamelCase=2048 , _lowerCamelCase=128 , _lowerCamelCase=1 , _lowerCamelCase=512 , _lowerCamelCase=30 , _lowerCamelCase=4_4100 , ): a :Dict = parent a :Optional[int] = batch_size a :Tuple = min_seq_length a :Dict = max_seq_length a :Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) a :Optional[int] = spectrogram_length a :int = feature_size a :Union[str, Any] = num_audio_channels a :int = hop_length a :Any = chunk_length a :Optional[Any] = sampling_rate def SCREAMING_SNAKE_CASE__ ( self ): return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=False , _lowerCamelCase=False ): def _flatten(_lowerCamelCase ): return list(itertools.chain(*_lowerCamelCase ) ) if equal_length: a :List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size a :Any = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: a :str = [np.asarray(_lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def SCREAMING_SNAKE_CASE__ ( self ): a :str = TvltFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''spectrogram_length''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''feature_size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''num_audio_channels''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''hop_length''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''chunk_length''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''sampling_rate''' ) ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a :Tuple = feat_extract_first.save_pretrained(_lowerCamelCase )[0] check_json_file_has_correct_format(_lowerCamelCase ) a :Dict = self.feature_extraction_class.from_pretrained(_lowerCamelCase ) a :List[Any] = feat_extract_first.to_dict() a :Union[str, Any] = feat_extract_second.to_dict() a :Dict = dict_first.pop('''mel_filters''' ) a :Any = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: a :List[Any] = os.path.join(_lowerCamelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(_lowerCamelCase ) a :Any = self.feature_extraction_class.from_json_file(_lowerCamelCase ) a :str = feat_extract_first.to_dict() a :str = feat_extract_second.to_dict() a :Optional[int] = dict_first.pop('''mel_filters''' ) a :Any = dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase ) ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): # Initialize feature_extractor a :Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 a :str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] a :Any = [np.asarray(_lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input a :Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched a :Any = feature_extractor(_lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking a :Tuple = feature_extractor( _lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=_lowerCamelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. a :Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] a :Dict = np.asarray(_lowerCamelCase ) a :Tuple = feature_extractor(_lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech a :List[Any] = ds.sort('''id''' ).select(range(_lowerCamelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self._load_datasamples(1 ) a :Any = TvltFeatureExtractor() a :Tuple = feature_extractor(_lowerCamelCase , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) a :str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCamelCase , atol=1e-4 ) )
445
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case : Optional[int] = logging.get_logger(__name__) snake_case : Optional[int] = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = 'unispeech' def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase ) a :Any = hidden_size a :str = feat_extract_norm a :List[Any] = feat_extract_activation a :Tuple = list(_lowerCamelCase ) a :Any = list(_lowerCamelCase ) a :List[Any] = list(_lowerCamelCase ) a :Union[str, Any] = conv_bias a :str = num_conv_pos_embeddings a :str = num_conv_pos_embedding_groups a :Tuple = len(self.conv_dim ) a :int = num_hidden_layers a :Any = intermediate_size a :Optional[Any] = hidden_act a :Tuple = num_attention_heads a :Any = hidden_dropout a :Any = attention_dropout a :Optional[Any] = activation_dropout a :Optional[Any] = feat_proj_dropout a :Any = final_dropout a :int = layerdrop a :int = layer_norm_eps a :Dict = initializer_range a :Dict = num_ctc_classes a :Optional[Any] = vocab_size a :str = do_stable_layer_norm a :Tuple = use_weighted_layer_sum a :Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a :List[Any] = apply_spec_augment a :Any = mask_time_prob a :Union[str, Any] = mask_time_length a :str = mask_time_min_masks a :Tuple = mask_feature_prob a :Dict = mask_feature_length a :int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations a :Union[str, Any] = num_codevectors_per_group a :Dict = num_codevector_groups a :List[Any] = contrastive_logits_temperature a :Union[str, Any] = feat_quantizer_dropout a :Optional[Any] = num_negatives a :Tuple = codevector_dim a :Optional[Any] = proj_codevector_dim a :Union[str, Any] = diversity_loss_weight # ctc loss a :List[Any] = ctc_loss_reduction a :Union[str, Any] = ctc_zero_infinity # pretraining loss a :int = replace_prob @property def SCREAMING_SNAKE_CASE__ ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
445
1
import numpy as np def __lowerCAmelCase ( __lowerCamelCase : np.array ) -> np.array: return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
718
def __lowerCAmelCase ( __lowerCamelCase : list ) -> list: __lowerCAmelCase =False while is_sorted is False: # Until all the indices are traversed keep looping __lowerCAmelCase =True for i in range(0 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i] # swapping if elements not in order __lowerCAmelCase =False for i in range(1 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i] # swapping if elements not in order __lowerCAmelCase =False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') lowercase_ = [int(x) for x in input().split()] # inputing elements of the list in one line lowercase_ = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
456
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class _a ( __lowerCAmelCase ): def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Dict: _snake_case = data def __iter__( self ) -> List[str]: for element in self.data: yield element def __a ( _UpperCamelCase: Union[str, Any]=True ) -> Dict: """simple docstring""" _snake_case = Accelerator(even_batches=_UpperCamelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __a ( _UpperCamelCase: Accelerator , _UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: bool = False ) -> Dict: """simple docstring""" if iterable: _snake_case = DummyIterableDataset(torch.as_tensor(range(_UpperCamelCase ) ) ) else: _snake_case = TensorDataset(torch.as_tensor(range(_UpperCamelCase ) ) ) _snake_case = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase ) _snake_case = accelerator.prepare(_UpperCamelCase ) return dl def __a ( _UpperCamelCase: Accelerator , _UpperCamelCase: int , _UpperCamelCase: int , _UpperCamelCase: List[int] , _UpperCamelCase: List[int] , ) -> str: """simple docstring""" _snake_case = create_dataloader(accelerator=_UpperCamelCase , dataset_size=_UpperCamelCase , batch_size=_UpperCamelCase ) _snake_case = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __a ( ) -> Tuple: """simple docstring""" _snake_case = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( _UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( _UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __a ( ) -> List[str]: """simple docstring""" _snake_case = create_accelerator(even_batches=_UpperCamelCase ) verify_dataloader_batch_sizes( _UpperCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( _UpperCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __a ( ) -> List[str]: """simple docstring""" _snake_case = create_accelerator(even_batches=_UpperCamelCase ) _snake_case = torch.nn.Linear(1 , 1 ) _snake_case = accelerator.prepare(_UpperCamelCase ) _snake_case = create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 ) _snake_case = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(_UpperCamelCase ): _snake_case = ddp_model(batch[0].float() ) _snake_case = output.sum() loss.backward() batch_idxs.append(_UpperCamelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __a ( _UpperCamelCase: Union[str, Any] ) -> Any: """simple docstring""" with warnings.catch_warnings(record=_UpperCamelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , _UpperCamelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def __a ( ) -> Optional[int]: """simple docstring""" _snake_case = True _snake_case = False _snake_case = create_accelerator(even_batches=_UpperCamelCase ) _snake_case = torch.nn.Linear(1 , 1 ) _snake_case = accelerator.prepare(_UpperCamelCase ) _snake_case = create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 ) _snake_case = create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCamelCase ): _snake_case = train_dl.batch_sampler.even_batches _snake_case = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __a ( ) -> List[str]: """simple docstring""" _snake_case = True _snake_case = False _snake_case = create_accelerator(even_batches=_UpperCamelCase ) _snake_case = torch.nn.Linear(1 , 1 ) _snake_case = accelerator.prepare(_UpperCamelCase ) create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCamelCase ) _snake_case = create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCamelCase ): _snake_case = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __a ( ) -> Any: """simple docstring""" _snake_case = create_accelerator() _snake_case = torch.nn.Linear(1 , 1 ) _snake_case = accelerator.prepare(_UpperCamelCase ) create_dataloader(_UpperCamelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCamelCase ) with warnings.catch_warnings(record=_UpperCamelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCamelCase ): pass assert issubclass(w[-1].category , _UpperCamelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def __a ( ) -> str: """simple docstring""" _snake_case = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) _snake_case = accelerator.state.distributed_type _snake_case = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(_UpperCamelCase ) _snake_case = original_state if __name__ == "__main__": main()
185
'''simple docstring''' from __future__ import annotations from typing import Any class _a : def __init__( self ,_SCREAMING_SNAKE_CASE ) -> None: _snake_case = num_of_nodes _snake_case = [] _snake_case = {} def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None: self.m_edges.append([u_node, v_node, weight] ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None: if self.m_component[u_node] != u_node: for k in self.m_component: _snake_case = self.find_component(_SCREAMING_SNAKE_CASE ) def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None: if component_size[u_node] <= component_size[v_node]: _snake_case = v_node component_size[v_node] += component_size[u_node] self.set_component(_SCREAMING_SNAKE_CASE ) elif component_size[u_node] >= component_size[v_node]: _snake_case = self.find_component(_SCREAMING_SNAKE_CASE ) component_size[u_node] += component_size[v_node] self.set_component(_SCREAMING_SNAKE_CASE ) def _lowercase ( self ) -> None: _snake_case = [] _snake_case = 0 _snake_case = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _snake_case = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _snake_case , _snake_case , _snake_case = edge _snake_case = self.m_component[u] _snake_case = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _snake_case = [u, v, w] for edge in minimum_weight_edge: if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): _snake_case , _snake_case , _snake_case = edge _snake_case = self.m_component[u] _snake_case = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _snake_case = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __a ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
185
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _a ( _lowercase : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _a ( _lowercase : Tuple ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Optional[int] = emb.weight.shape __UpperCAmelCase : Tuple = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) __UpperCAmelCase : Dict = emb.weight.data return lin_layer def _a ( _lowercase : int , _lowercase : Tuple="facebook/mbart-large-en-ro" , _lowercase : int=False , _lowercase : Dict=False ): '''simple docstring''' __UpperCAmelCase : List[str] = torch.load(lowercase__ , map_location='''cpu''' )['''model'''] remove_ignore_keys_(lowercase__ ) __UpperCAmelCase : Optional[int] = state_dict['''encoder.embed_tokens.weight'''].shape[0] __UpperCAmelCase : Optional[int] = MBartConfig.from_pretrained(lowercase__ , vocab_size=lowercase__ ) if mbart_aa and finetuned: __UpperCAmelCase : Dict = '''relu''' __UpperCAmelCase : Union[str, Any] = state_dict['''decoder.embed_tokens.weight'''] __UpperCAmelCase : Optional[Any] = MBartForConditionalGeneration(lowercase__ ) model.model.load_state_dict(lowercase__ ) if finetuned: __UpperCAmelCase : Any = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __UpperCAmelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="facebook/mbart-large-cc25", type=str, help="Which huggingface architecture to use: mbart-large", ) parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint") parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") __UpperCAmelCase :Optional[Any] = parser.parse_args() __UpperCAmelCase :str = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
719
'''simple docstring''' def _a ( _lowercase : int = 4000000 ): '''simple docstring''' __UpperCAmelCase : Tuple = [] __UpperCAmelCase , __UpperCAmelCase : int = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_lowercase ) __UpperCAmelCase , __UpperCAmelCase : Dict = b, a + b return sum(_lowercase ) if __name__ == "__main__": print(f"""{solution() = }""")
266
0
"""simple docstring""" import torch from transformers import AutoModel class _lowerCamelCase ( torch.nn.Module ): def __init__( self : Dict , UpperCamelCase : Union[str, Any]="sayef/fsner-bert-base-uncased" ) -> Any: """simple docstring""" super(UpperCamelCase , self ).__init__() lowerCAmelCase__ : List[str] = AutoModel.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 ) lowerCAmelCase__ : str = torch.nn.Softmax(dim=1 ) def _lowerCAmelCase ( self : str , **UpperCamelCase : Union[str, Any] ) -> List[str]: """simple docstring""" return self.bert(**UpperCamelCase ).last_hidden_state def _lowerCAmelCase ( self : Any , UpperCamelCase : Any ) -> Dict: """simple docstring""" return token_embeddings.sum(2 , keepdim=UpperCamelCase ) def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=1 ) -> Optional[int]: """simple docstring""" return self.softmax(T * self.cos(UpperCamelCase , UpperCamelCase ) ) def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : List[str] = W_supports["""sizes"""].tolist() lowerCAmelCase__ : Union[str, Any] = W_supports["""start_token_id"""].item() lowerCAmelCase__ : Optional[int] = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] lowerCAmelCase__ : List[Any] = self.BERT(**UpperCamelCase ) lowerCAmelCase__ : List[str] = self.BERT(**UpperCamelCase ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : str = None lowerCAmelCase__ : List[str] = W_supports["""input_ids"""] == start_token_id lowerCAmelCase__ : Optional[int] = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(UpperCamelCase ): if i == 0: lowerCAmelCase__ : Tuple = 0 else: lowerCAmelCase__ : Optional[Any] = support_sizes[i - 1] lowerCAmelCase__ : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]] lowerCAmelCase__ : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]] lowerCAmelCase__ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) lowerCAmelCase__ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: lowerCAmelCase__ : Optional[Any] = torch.vstack((p_starts, p_start) ) lowerCAmelCase__ : List[str] = torch.vstack((p_ends, p_end) ) else: lowerCAmelCase__ : Union[str, Any] = p_start lowerCAmelCase__ : Optional[int] = p_end return p_starts, p_ends
299
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowercase_ ( __UpperCAmelCase ) -> Dict: assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowercase_ ( ) -> Any: assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowercase_ ( ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = """mock-s3-bucket""" lowerCAmelCase__ : List[str] = f"""s3://{mock_bucket}""" lowerCAmelCase__ : Dict = extract_path_from_uri(__UpperCAmelCase ) assert dataset_path.startswith("""s3://""" ) is False lowerCAmelCase__ : List[str] = """./local/path""" lowerCAmelCase__ : List[Any] = extract_path_from_uri(__UpperCAmelCase ) assert dataset_path == new_dataset_path def lowercase_ ( __UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = is_remote_filesystem(__UpperCAmelCase ) assert is_remote is True lowerCAmelCase__ : Any = fsspec.filesystem("""file""" ) lowerCAmelCase__ : int = is_remote_filesystem(__UpperCAmelCase ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , __UpperCAmelCase ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: lowerCAmelCase__ : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} lowerCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: lowerCAmelCase__ : Any = f"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__UpperCAmelCase ) lowerCAmelCase__ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=__UpperCAmelCase ) assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = os.path.basename(__UpperCAmelCase ) lowerCAmelCase__ : Dict = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f, open(__UpperCAmelCase , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: lowerCAmelCase__ : List[Any] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} lowerCAmelCase__ : List[Any] = compressed_file_paths[protocol] lowerCAmelCase__ : Optional[Any] = """dataset.jsonl""" lowerCAmelCase__ : Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}""" lowerCAmelCase__ , *lowerCAmelCase__ : int = fsspec.get_fs_token_paths(__UpperCAmelCase ) assert fs.isfile(__UpperCAmelCase ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Tuple = hf_api.dataset_info(__UpperCAmelCase , token=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = HfFileSystem(repo_info=__UpperCAmelCase , token=__UpperCAmelCase ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(__UpperCAmelCase ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def lowercase_ ( ) -> Optional[int]: lowerCAmelCase__ : int = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__UpperCAmelCase , __UpperCAmelCase , clobber=__UpperCAmelCase ) with pytest.warns(__UpperCAmelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__UpperCAmelCase ) == 1 assert ( str(warning_info[0].message ) == f"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
299
1
'''simple docstring''' import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = False if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--repo_path''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = { '''image_size''': '''sample_size''', '''num_res_blocks''': '''layers_per_block''', '''block_channels''': '''block_out_channels''', '''down_blocks''': '''down_block_types''', '''up_blocks''': '''up_block_types''', '''downscale_freq_shift''': '''freq_shift''', '''resnet_num_groups''': '''norm_num_groups''', '''resnet_act_fn''': '''act_fn''', '''resnet_eps''': '''norm_eps''', '''num_head_channels''': '''attention_head_dim''', } _lowerCAmelCase = { '''time_steps''': '''time_proj''', '''mid''': '''mid_block''', '''downsample_blocks''': '''down_blocks''', '''upsample_blocks''': '''up_blocks''', } _lowerCAmelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet''' with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader: _lowerCAmelCase = reader.read() _lowerCAmelCase = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, '''config.json'''): _lowerCAmelCase = UNetaDModel(**config) else: _lowerCAmelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel _lowerCAmelCase = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) _lowerCAmelCase = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: _lowerCAmelCase = config[key] del config[key] _lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']] _lowerCAmelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']] if do_only_weights: _lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin''')) _lowerCAmelCase = {} for param_key, param_value in state_dict.items(): if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''): continue _lowerCAmelCase = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('''.''')[0] == key: _lowerCAmelCase = param_value _lowerCAmelCase = True if not has_changed: _lowerCAmelCase = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
160
'''simple docstring''' import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from torch.utils.cpp_extension import load lowerCAmelCase__ : Dict = Path(UpperCamelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" lowerCAmelCase__ : Any = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , UpperCamelCase , with_cuda=UpperCamelCase , extra_include_paths=[str(UpperCamelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
160
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""") lowercase__ : Optional[int] = tf.convert_to_tensor( [[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_)["""last_hidden_state"""] lowercase__ : Tuple = tf.TensorShape((1, 10, 7_68)) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_) # compare the actual values for a slice. lowercase__ : Optional[Any] = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
12
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
502
0
def _A ( _UpperCamelCase = 10 , _UpperCamelCase = 22 ): _UpperCAmelCase : List[str] = range(1 , snake_case_ ) _UpperCAmelCase : str = range(1 , snake_case_ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
710
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : Any = Dict[str, Any] UpperCAmelCase__ : List[str] = List[Prediction] @add_end_docstrings(lowercase_ ) class lowerCAmelCase_ ( lowercase_ ): def __init__( self : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any] ) -> Any: '''simple docstring''' super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) if self.framework == "tf": raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def a_ ( self : str , **UpperCAmelCase_ : int ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : List[str] = {} if "threshold" in kwargs: _UpperCAmelCase : List[str] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ ) def a_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> Any: '''simple docstring''' _UpperCAmelCase : Dict = load_image(UpperCAmelCase_ ) _UpperCAmelCase : Dict = torch.IntTensor([[image.height, image.width]] ) _UpperCAmelCase : Dict = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: _UpperCAmelCase : Optional[Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) _UpperCAmelCase : Tuple = target_size return inputs def a_ ( self : List[str] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Dict = model_inputs.pop('''target_size''' ) _UpperCAmelCase : int = self.model(**UpperCAmelCase_ ) _UpperCAmelCase : int = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: _UpperCAmelCase : Tuple = model_inputs['''bbox'''] return model_outputs def a_ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=0.9 ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. _UpperCAmelCase , _UpperCAmelCase : Tuple = target_size[0].tolist() def unnormalize(UpperCAmelCase_ : List[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) _UpperCAmelCase : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] _UpperCAmelCase : List[Any] = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] _UpperCAmelCase : Union[str, Any] = ['''score''', '''label''', '''box'''] _UpperCAmelCase : Optional[Any] = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel _UpperCAmelCase : Optional[int] = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCAmelCase : Any = raw_annotations[0] _UpperCAmelCase : List[str] = raw_annotation['''scores'''] _UpperCAmelCase : str = raw_annotation['''labels'''] _UpperCAmelCase : Dict = raw_annotation['''boxes'''] _UpperCAmelCase : List[str] = scores.tolist() _UpperCAmelCase : int = [self.model.config.idalabel[label.item()] for label in labels] _UpperCAmelCase : Any = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] _UpperCAmelCase : Tuple = ['''score''', '''label''', '''box'''] _UpperCAmelCase : Any = [ dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def a_ ( self : Optional[int] , UpperCAmelCase_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = box.int().tolist() _UpperCAmelCase : Optional[Any] = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
416
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : Optional[int] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class _a ( A__ ): """simple docstring""" snake_case ="""EncodecFeatureExtractor""" snake_case =("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , _snake_case , _snake_case ): super().__init__(_snake_case , _snake_case ) _UpperCAmelCase =self.feature_extractor _UpperCAmelCase =False def SCREAMING_SNAKE_CASE ( self , _snake_case=None , _snake_case=None , _snake_case=True ): return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case ) def __call__( self , *_snake_case , **_snake_case ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_snake_case , **_snake_case ) _UpperCAmelCase =kwargs.pop("audio" , _snake_case ) _UpperCAmelCase =kwargs.pop("sampling_rate" , _snake_case ) _UpperCAmelCase =kwargs.pop("text" , _snake_case ) if len(_snake_case ) > 0: _UpperCAmelCase =args[0] _UpperCAmelCase =args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if text is not None: _UpperCAmelCase =self.tokenizer(_snake_case , **_snake_case ) if audio is not None: _UpperCAmelCase =self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case ) if audio is None: return inputs elif text is None: return audio_inputs else: _UpperCAmelCase =audio_inputs["input_values"] if "padding_mask" in audio_inputs: _UpperCAmelCase =audio_inputs["padding_mask"] return inputs def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ): _UpperCAmelCase =kwargs.pop("audio" , _snake_case ) _UpperCAmelCase =kwargs.pop("padding_mask" , _snake_case ) if len(_snake_case ) > 0: _UpperCAmelCase =args[0] _UpperCAmelCase =args[1:] if audio_values is not None: return self._decode_audio(_snake_case , padding_mask=_snake_case ) else: return self.tokenizer.batch_decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ): return self.tokenizer.decode(*_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ): _UpperCAmelCase =to_numpy(_snake_case ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =audio_values.shape if padding_mask is None: return list(_snake_case ) _UpperCAmelCase =to_numpy(_snake_case ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _UpperCAmelCase =seq_len - padding_mask.shape[-1] _UpperCAmelCase =1 - self.feature_extractor.padding_value _UpperCAmelCase =np.pad(_snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=_snake_case ) _UpperCAmelCase =audio_values.tolist() for i in range(_snake_case ): _UpperCAmelCase =np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _UpperCAmelCase =sliced_audio.reshape(_snake_case , -1 ) return audio_values
408
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): UpperCamelCase__ : Tuple = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2] UpperCamelCase__ : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False UpperCamelCase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False UpperCamelCase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: UpperCamelCase__ : Dict = [3, 3, 3, 3] UpperCamelCase__ : Union[str, Any] = [5, 5, 5, 5] elif "fl4" in model_name: UpperCamelCase__ : Union[str, Any] = [4, 4, 4, 4] UpperCamelCase__ : Optional[int] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: UpperCamelCase__ : Tuple = [3, 3, 3, 3] if "lrf" in model_name: UpperCamelCase__ : List[str] = [3, 3, 3, 3] else: UpperCamelCase__ : Optional[int] = [2, 2, 2, 2] if "tiny" in model_name: UpperCamelCase__ : int = 9_6 elif "small" in model_name: UpperCamelCase__ : Optional[Any] = 9_6 elif "base" in model_name: UpperCamelCase__ : Tuple = 1_2_8 elif "large" in model_name: UpperCamelCase__ : List[str] = 1_9_2 elif "xlarge" in model_name: UpperCamelCase__ : Optional[int] = 2_5_6 elif "huge" in model_name: UpperCamelCase__ : Any = 3_5_2 # set label information UpperCamelCase__ : str = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: UpperCamelCase__ : str = '''imagenet-22k-id2label.json''' else: UpperCamelCase__ : Optional[Any] = '''imagenet-1k-id2label.json''' UpperCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : List[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Dict = FocalNetConfig( embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , ) return config def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): if "patch_embed.proj" in name: UpperCamelCase__ : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: UpperCamelCase__ : Union[str, Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: UpperCamelCase__ : int = '''encoder.''' + name if "encoder.layers" in name: UpperCamelCase__ : Union[str, Any] = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: UpperCamelCase__ : List[Any] = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: UpperCamelCase__ : List[Any] = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: UpperCamelCase__ : Any = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: UpperCamelCase__ : List[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: UpperCamelCase__ : Any = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": UpperCamelCase__ : Dict = '''layernorm.weight''' if name == "norm.bias": UpperCamelCase__ : List[Any] = '''layernorm.bias''' if "head" in name: UpperCamelCase__ : Any = name.replace('''head''' , '''classifier''' ) else: UpperCamelCase__ : List[Any] = '''focalnet.''' + name return name def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ): # fmt: off UpperCamelCase__ : Dict = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on UpperCamelCase__ : Tuple = model_name_to_url[model_name] print('''Checkpoint URL: ''' , UpperCamelCase__ ) UpperCamelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): UpperCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase__ ) UpperCamelCase__ : Dict = val UpperCamelCase__ : Tuple = get_focalnet_config(UpperCamelCase__ ) UpperCamelCase__ : List[str] = FocalNetForImageClassification(UpperCamelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCamelCase__ ) # verify conversion UpperCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ : List[Any] = BitImageProcessor( do_resize=UpperCamelCase__ , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=2_2_4 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , ) UpperCamelCase__ : str = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) UpperCamelCase__ : List[Any] = processor(images=UpperCamelCase__ , return_tensors='''pt''' ) UpperCamelCase__ : Tuple = transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) UpperCamelCase__ : Union[str, Any] = image_transforms(UpperCamelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1e-4 ) UpperCamelCase__ : List[str] = model(**UpperCamelCase__ ) UpperCamelCase__ : Any = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": UpperCamelCase__ : str = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": UpperCamelCase__ : int = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": UpperCamelCase__ : Dict = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": UpperCamelCase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": UpperCamelCase__ : Optional[Any] = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": UpperCamelCase__ : str = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="focalnet-tiny", type=str, help="Name of the FocalNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub.", ) lowerCamelCase =parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
707
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase ={"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase =[ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase =[ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
462
0
class snake_case__ : # Public class to implement a graph def __init__( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Any ): snake_case__ : Tuple = row snake_case__ : Optional[int] = col snake_case__ : Optional[Any] = graph def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ): # Checking all 8 elements surrounding nth element snake_case__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order snake_case__ : Dict = [-1, 0, 1, -1, 1, -1, 0, 1] snake_case__ : List[str] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a_ ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a_ ) def UpperCAmelCase__ ( self : Any ): # And finally, count all islands. snake_case__ : Union[str, Any] = [[False for j in range(self.COL )] for i in range(self.ROW )] snake_case__ : Optional[Any] = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a_ , a_ , a_ ) count += 1 return count
170
"""simple docstring""" def __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase = f"Input value of [number={number}] must be an integer" raise TypeError(UpperCamelCase__ ) if number < 0: return False _UpperCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
657
0
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase = { '''169M''': 12, '''430M''': 24, '''1B5''': 24, '''3B''': 32, '''7B''': 32, '''14B''': 40, } lowercase = { '''169M''': 768, '''430M''': 1_024, '''1B5''': 2_048, '''3B''': 2_560, '''7B''': 4_096, '''14B''': 5_120, } def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' a_ =list(state_dict.keys() ) for name in state_dict_keys: a_ =state_dict.pop(lowercase__ ) # emb -> embedding if name.startswith("emb." ): a_ =name.replace("emb." , "embeddings." ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0" ): a_ =name.replace("blocks.0.ln0" , "blocks.0.pre_ln" ) # att -> attention a_ =re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , lowercase__ ) # ffn -> feed_forward a_ =re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , lowercase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k" ): a_ =name.replace(".time_mix_k" , ".time_mix_key" ) # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v" ): a_ =name.replace(".time_mix_v" , ".time_mix_value" ) # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r" ): a_ =name.replace(".time_mix_r" , ".time_mix_receptance" ) if name != "head.weight": a_ ="rwkv." + name a_ =weight return state_dict def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=None ): '''simple docstring''' if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer." ) a_ =5_0_2_7_7 a_ =AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" ) else: a_ =PreTrainedTokenizerFast(tokenizer_file=lowercase__ ) a_ =len(lowercase__ ) tokenizer.save_pretrained(lowercase__ ) # 2. Build the config a_ =list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: a_ =candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument." ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) a_ =RwkvConfig( vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(lowercase__ ) # 3. Download model file then convert state_dict a_ =hf_hub_download(lowercase__ , lowercase__ ) a_ =torch.load(lowercase__ , map_location="cpu" ) a_ =convert_state_dict(lowercase__ ) # 4. Split in shards and save a_ , a_ =shard_checkpoint(lowercase__ ) for shard_file, shard in shards.items(): torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) ) if index is not None: a_ =os.path.join(lowercase__ , lowercase__ ) # Save the index as well with open(lowercase__ , "w" , encoding="utf-8" ) as f: a_ =json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + "\n" f.write(lowercase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) a_ =list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: a_ =torch.load(os.path.join(lowercase__ , lowercase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub." ) a_ =AutoModelForCausalLM.from_pretrained(lowercase__ ) model.push_to_hub(lowercase__ , max_shard_size="2GB" ) tokenizer.push_to_hub(lowercase__ ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) lowercase = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
41
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline lowercase = '''path-to-your-trained-model''' lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowercase = '''A photo of sks dog in a bucket''' lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
41
1
from __future__ import annotations _A = 1.6021e-19 # units = C def lowerCAmelCase_ ( __a , __a , __a , ) -> tuple[str, float]: """simple docstring""" if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
258
import numpy # List of input, output pairs _A = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _A = (((515, 22, 13), 555), ((61, 35, 49), 150)) _A = [2, 4, 1, 5] _A = len(train_data) _A = 0.009 def lowerCAmelCase_ ( __a , __a="train" ) -> Optional[int]: """simple docstring""" return calculate_hypothesis_value(__a , __a ) - output( __a , __a ) def lowerCAmelCase_ ( __a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple =0 for i in range(len(__a ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def lowerCAmelCase_ ( __a , __a ) -> str: """simple docstring""" if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def lowerCAmelCase_ ( __a , __a=m ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple =0 for i in range(__a ): if index == -1: summation_value += _error(__a ) else: summation_value += _error(__a ) * train_data[i][0][index] return summation_value def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str =summation_of_cost_derivative(__a , __a ) / m return cost_derivative_value def lowerCAmelCase_ ( ) -> Tuple: """simple docstring""" global parameter_vector # Tune these values to set a tolerance value for predicted output SCREAMING_SNAKE_CASE : Tuple =0.000002 SCREAMING_SNAKE_CASE : Optional[Any] =0 SCREAMING_SNAKE_CASE : Tuple =0 while True: j += 1 SCREAMING_SNAKE_CASE : List[str] =[0, 0, 0, 0] for i in range(0 , len(__a ) ): SCREAMING_SNAKE_CASE : Tuple =get_cost_derivative(i - 1 ) SCREAMING_SNAKE_CASE : Tuple =( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __a , __a , atol=__a , rtol=__a , ): break SCREAMING_SNAKE_CASE : Union[str, Any] =temp_parameter_vector print(('''Number of iterations:''', j) ) def lowerCAmelCase_ ( ) -> int: """simple docstring""" for i in range(len(__a ) ): print(('''Actual output value:''', output(__a , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(__a , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
258
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase__ = False class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : int): return 12 @property def __UpperCamelCase ( self : Tuple): return 12 @property def __UpperCamelCase ( self : Dict): return 32 @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : List[str]): torch.manual_seed(0) UpperCamelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = 12 UpperCamelCase__ : Dict = 12 UpperCamelCase__ : Union[str, Any] = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = 'cpu' UpperCamelCase__ : List[str] = self.dummy_vqvae UpperCamelCase__ : List[str] = self.dummy_text_encoder UpperCamelCase__ : Optional[int] = self.dummy_tokenizer UpperCamelCase__ : List[str] = self.dummy_transformer UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_) UpperCamelCase__ : int = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Optional[int] = 'cpu' UpperCamelCase__ : str = self.dummy_vqvae UpperCamelCase__ : Any = self.dummy_text_encoder UpperCamelCase__ : List[Any] = self.dummy_tokenizer UpperCamelCase__ : Dict = self.dummy_transformer UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) UpperCamelCase__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : int = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy') UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq') UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : int = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : int = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
6
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCAmelCase__ = False class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __UpperCamelCase ( self : int): return 12 @property def __UpperCamelCase ( self : Tuple): return 12 @property def __UpperCamelCase ( self : Dict): return 32 @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __UpperCamelCase ( self : Optional[Any]): UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def __UpperCamelCase ( self : List[str]): torch.manual_seed(0) UpperCamelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCAmelCase_) @property def __UpperCamelCase ( self : Optional[int]): torch.manual_seed(0) UpperCamelCase__ : List[Any] = 12 UpperCamelCase__ : Dict = 12 UpperCamelCase__ : Union[str, Any] = { 'attention_bias': True, 'cross_attention_dim': 32, 'attention_head_dim': height * width, 'num_attention_heads': 1, 'num_vector_embeds': self.num_embed, 'num_embeds_ada_norm': self.num_embeds_ada_norm, 'norm_num_groups': 32, 'sample_size': width, 'activation_fn': 'geglu-approximate', } UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_) return model def __UpperCamelCase ( self : int): UpperCamelCase__ : List[Any] = 'cpu' UpperCamelCase__ : List[str] = self.dummy_vqvae UpperCamelCase__ : List[str] = self.dummy_text_encoder UpperCamelCase__ : Optional[int] = self.dummy_tokenizer UpperCamelCase__ : List[str] = self.dummy_transformer UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_) UpperCamelCase__ : int = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : Optional[Any] = output.images UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def __UpperCamelCase ( self : Optional[int]): UpperCamelCase__ : Optional[int] = 'cpu' UpperCamelCase__ : str = self.dummy_vqvae UpperCamelCase__ : Any = self.dummy_text_encoder UpperCamelCase__ : List[Any] = self.dummy_tokenizer UpperCamelCase__ : Dict = self.dummy_transformer UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed) UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length) UpperCamelCase__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , ) UpperCamelCase__ : str = pipe.to(UpperCAmelCase_) pipe.set_progress_bar_config(disable=UpperCAmelCase_) UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool' UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np') UpperCamelCase__ : int = output.images UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : Optional[Any] = pipe( [prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0] UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88]) assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase (unittest.TestCase ): def __UpperCamelCase ( self : Any): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]): UpperCamelCase__ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy') UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq') UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_) pipeline.set_progress_bar_config(disable=UpperCAmelCase_) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0) UpperCamelCase__ : int = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , ) UpperCamelCase__ : int = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 2.0
6
1
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = len(lowerCAmelCase_) for i in range(length - 1): lowerCamelCase_ : int = i for k in range(i + 1 , lowerCAmelCase_): if collection[k] < collection[least]: lowerCamelCase_ : Optional[int] = k if least != i: lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = (collection[i], collection[least]) return collection if __name__ == "__main__": __magic_name__ = input('''Enter numbers separated by a comma:\n''').strip() __magic_name__ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
250
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['''ConditionalDetrFeatureExtractor'''] __magic_name__ = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
250
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class UpperCAmelCase__ ( _snake_case ): """simple docstring""" A : Optional[Any] = '''open-llama''' def __init__(self , _a=100_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a="silu" , _a=2_048 , _a=0.02 , _a=1e-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ) -> str: lowercase_ : List[Any] = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings lowercase_ : List[str] = hidden_size lowercase_ : Optional[Any] = intermediate_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Any = num_attention_heads lowercase_ : List[Any] = hidden_act lowercase_ : Dict = initializer_range lowercase_ : List[str] = rms_norm_eps lowercase_ : Optional[int] = use_cache lowercase_ : Optional[Any] = kwargs.pop( 'use_memorry_efficient_attention' , _a ) lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_dropout_prob lowercase_ : List[str] = use_stable_embedding lowercase_ : int = shared_input_output_embedding lowercase_ : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , ) def _lowerCamelCase (self ) -> int: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) lowercase_ : int = self.rope_scaling.get('type' , _a ) lowercase_ : Optional[int] = self.rope_scaling.get('factor' , _a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
704
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) lowercase_ : str = precision lowercase_ : List[str] = ceil(precision / 14 ) lowercase_ : Union[str, Any] = 426_880 * Decimal(10_005 ).sqrt() lowercase_ : List[Any] = 1 lowercase_ : Optional[int] = 13_591_409 lowercase_ : Dict = Decimal(SCREAMING_SNAKE_CASE_ ) for k in range(1 , SCREAMING_SNAKE_CASE_ ): lowercase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": _A = 5_0 print(F"""The first {n} digits of pi is: {pi(n)}""")
438
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def _A ( lowerCAmelCase_ : str ): """simple docstring""" lowerCAmelCase__ = DPTConfig(embedding_type="hybrid" ) if "large" in checkpoint_url: lowerCAmelCase__ = 1024 lowerCAmelCase__ = 4096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = [5, 11, 17, 23] lowerCAmelCase__ = [256, 512, 1024, 1024] lowerCAmelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCAmelCase__ = 768 lowerCAmelCase__ = [1, 1, 1, 0.5] lowerCAmelCase__ = [256, 512, 768, 768] lowerCAmelCase__ = 150 lowerCAmelCase__ = 16 lowerCAmelCase__ = (1, 384, 384) lowerCAmelCase__ = False lowerCAmelCase__ = "project" if "ade" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 768 lowerCAmelCase__ = [1, 1, 1, 0.5] lowerCAmelCase__ = 150 lowerCAmelCase__ = 16 lowerCAmelCase__ = "huggingface/label-files" lowerCAmelCase__ = "ade20k-id2label.json" lowerCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) ) , "r" ) ) lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = [1, 150, 480, 480] return config, expected_shape def _A ( lowerCAmelCase_ : List[Any] ): """simple docstring""" lowerCAmelCase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : Optional[int] ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCAmelCase__ = name.replace("pretrained.model" , "dpt.encoder" ) if "pretrained.model" in name: lowerCAmelCase__ = name.replace("pretrained.model" , "dpt.embeddings" ) if "patch_embed" in name: lowerCAmelCase__ = name.replace("patch_embed" , "" ) if "pos_embed" in name: lowerCAmelCase__ = name.replace("pos_embed" , "position_embeddings" ) if "attn.proj" in name: lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" ) if "proj" in name and "project" not in name: lowerCAmelCase__ = name.replace("proj" , "projection" ) if "blocks" in name: lowerCAmelCase__ = name.replace("blocks" , "layer" ) if "mlp.fc1" in name: lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" ) if "norm1" in name and "backbone" not in name: lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name and "backbone" not in name: lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" ) if "scratch.output_conv" in name: lowerCAmelCase__ = name.replace("scratch.output_conv" , "head" ) if "scratch" in name: lowerCAmelCase__ = name.replace("scratch" , "neck" ) if "layer1_rn" in name: lowerCAmelCase__ = name.replace("layer1_rn" , "convs.0" ) if "layer2_rn" in name: lowerCAmelCase__ = name.replace("layer2_rn" , "convs.1" ) if "layer3_rn" in name: lowerCAmelCase__ = name.replace("layer3_rn" , "convs.2" ) if "layer4_rn" in name: lowerCAmelCase__ = name.replace("layer4_rn" , "convs.3" ) if "refinenet" in name: lowerCAmelCase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCAmelCase__ = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: lowerCAmelCase__ = name.replace("out_conv" , "projection" ) if "resConfUnit1" in name: lowerCAmelCase__ = name.replace("resConfUnit1" , "residual_layer1" ) if "resConfUnit2" in name: lowerCAmelCase__ = name.replace("resConfUnit2" , "residual_layer2" ) if "conv1" in name: lowerCAmelCase__ = name.replace("conv1" , "convolution1" ) if "conv2" in name: lowerCAmelCase__ = name.replace("conv2" , "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: lowerCAmelCase__ = name.replace("pretrained" , "dpt" ) if "bn" in name: lowerCAmelCase__ = name.replace("bn" , "batch_norm" ) if "head" in name: lowerCAmelCase__ = name.replace("head" , "head.head" ) if "encoder.norm" in name: lowerCAmelCase__ = name.replace("encoder.norm" , "layernorm" ) if "auxlayer" in name: lowerCAmelCase__ = name.replace("auxlayer" , "auxiliary_head.head" ) if "backbone" in name: lowerCAmelCase__ = name.replace("backbone" , "backbone.bit.encoder" ) if ".." in name: lowerCAmelCase__ = name.replace(".." , "." ) if "stem.conv" in name: lowerCAmelCase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "convolution" in name and "backbone" in name: lowerCAmelCase__ = name.replace("convolution" , "conv" ) if "layer" in name and "backbone" in name: lowerCAmelCase__ = name.replace("layer" , "layers" ) if "backbone.bit.encoder.bit" in name: lowerCAmelCase__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" ) if "embedder.conv" in name: lowerCAmelCase__ = name.replace("embedder.conv" , "embedder.convolution" ) if "backbone.bit.encoder.stem.norm" in name: lowerCAmelCase__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" ) return name def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' ) lowerCAmelCase__ = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _A ( ): """simple docstring""" lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = get_dpt_config(lowerCAmelCase_ ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" ) # remove certain keys remove_ignore_keys_(lowerCAmelCase_ ) # rename keys for key in state_dict.copy().keys(): lowerCAmelCase__ = state_dict.pop(lowerCAmelCase_ ) lowerCAmelCase__ = val # read in qkv matrices read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ ) # load HuggingFace model lowerCAmelCase__ = DPTForSemanticSegmentation(lowerCAmelCase_ ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() # Check outputs on an image lowerCAmelCase__ = 480 if "ade" in checkpoint_url else 384 lowerCAmelCase__ = DPTImageProcessor(size=lowerCAmelCase_ ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors="pt" ) # forward pass lowerCAmelCase__ = model(**lowerCAmelCase_ ).logits if "ade" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth if show_prediction: lowerCAmelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=lowerCAmelCase_ , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: model.push_to_hub("ybelkada/dpt-hybrid-midas" ) image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
61
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __a = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __a = direct_transformers_import(PATH_TO_TRANSFORMERS) __a = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __a = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __a = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __lowercase ( _UpperCamelCase ) ->Any: """simple docstring""" lowercase : Tuple = None # source code of `config_class` lowercase : Dict = inspect.getsource(_UpperCamelCase ) lowercase : List[str] = _re_checkpoint.findall(_UpperCamelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): lowercase : List[str] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowercase : List[str] = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowercase : Dict = ckpt_name break return checkpoint def __lowercase ( ) ->str: """simple docstring""" lowercase : str = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowercase : Optional[int] = get_checkpoint_from_config_class(_UpperCamelCase ) lowercase : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Any = '''\n'''.join(sorted(_UpperCamelCase ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
319
0
def __lowerCamelCase ( __a : str ) -> list: return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__a ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
594
from math import factorial def __lowerCamelCase ( __a : int , __a : int , __a : float ) -> float: if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(__a , __a ) or not isinstance(__a , __a ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) _lowercase =(prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _lowercase =float(factorial(__a ) ) coefficient /= factorial(__a ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("Probability of 2 successes out of 4 trails") print("with probability of 0.75 is:", end=" ") print(binomial_distribution(2, 4, 0.7_5))
594
1
from datetime import datetime import requests def _UpperCamelCase ( lowerCAmelCase_ ) ->bytes: UpperCAmelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url=""" UpperCAmelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""] return requests.get(lowerCAmelCase_ ).content if __name__ == "__main__": __a = input("""Enter Video/IGTV url: """).strip() __a = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, """wb""") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
377
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any: UpperCAmelCase = 0 UpperCAmelCase = len(lowerCAmelCase_ ) for i in range(n - 1 ): for j in range(i + 1 , lowerCAmelCase_ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _UpperCamelCase ( lowerCAmelCase_ ) ->Any: if len(lowerCAmelCase_ ) <= 1: return arr, 0 UpperCAmelCase = len(lowerCAmelCase_ ) // 2 UpperCAmelCase = arr[0:mid] UpperCAmelCase = arr[mid:] UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ ) UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ ) UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase = inversion_p + inversions_q + cross_inversions return c, num_inversions def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int: UpperCAmelCase = [] UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0 while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCAmelCase_ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(lowerCAmelCase_ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _UpperCamelCase ( ) ->int: UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ ) UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , lowerCAmelCase_ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ ) UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCAmelCase_ ) # an empty list should also have zero inversions UpperCAmelCase = [] UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ ) UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCAmelCase_ ) if __name__ == "__main__": main()
377
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') UpperCAmelCase__ = int(input('Enter number: ').strip()) print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
707
"""simple docstring""" import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> List[Any]: _snake_case = os.path.abspath(__lowerCamelCase ) logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' ) # Load weights from TF model _snake_case = tf.train.list_variables(__lowerCamelCase ) _snake_case = [] _snake_case = [] _snake_case = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") _snake_case = full_name.split('''/''' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f'''Skipping non-model layer {full_name}''' ) continue if "optimizer" in full_name: logger.info(f'''Skipping optimization layer {full_name}''' ) continue if name[0] == "model": # ignore initial 'model' _snake_case = name[1:] # figure out how many levels deep the name is _snake_case = 0 for _name in name: if _name.startswith('''layer_with_weights''' ): depth += 1 else: break layer_depth.append(__lowerCamelCase ) # read data _snake_case = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase ) names.append('''/'''.join(__lowerCamelCase ) ) arrays.append(__lowerCamelCase ) logger.info(f'''Read a total of {len(__lowerCamelCase ):,} layers''' ) # Sanity check if len(set(__lowerCamelCase ) ) != 1: raise ValueError(f'''Found layer names with different depths (layer depth {list(set(__lowerCamelCase ) )})''' ) _snake_case = list(set(__lowerCamelCase ) )[0] if layer_depth != 1: raise ValueError( '''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP''' ''' heads.''' ) # convert layers logger.info('''Converting weights...''' ) for full_name, array in zip(__lowerCamelCase , __lowerCamelCase ): _snake_case = full_name.split('''/''' ) _snake_case = model _snake_case = [] for i, m_name in enumerate(__lowerCamelCase ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('''layer_with_weights''' ): _snake_case = int(m_name.split('''-''' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['''embeddings''', '''LayerNorm'''] ) _snake_case = getattr(__lowerCamelCase , '''embeddings''' ) _snake_case = getattr(__lowerCamelCase , '''LayerNorm''' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] ) _snake_case = getattr(__lowerCamelCase , '''encoder''' ) _snake_case = getattr(__lowerCamelCase , '''layer''' ) _snake_case = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['''pooler''', '''dense'''] ) _snake_case = getattr(__lowerCamelCase , '''pooler''' ) _snake_case = getattr(__lowerCamelCase , '''dense''' ) elif m_name == "embeddings": trace.append('''embeddings''' ) _snake_case = getattr(__lowerCamelCase , '''embeddings''' ) if layer_num == 0: trace.append('''word_embeddings''' ) _snake_case = getattr(__lowerCamelCase , '''word_embeddings''' ) elif layer_num == 1: trace.append('''position_embeddings''' ) _snake_case = getattr(__lowerCamelCase , '''position_embeddings''' ) elif layer_num == 2: trace.append('''token_type_embeddings''' ) _snake_case = getattr(__lowerCamelCase , '''token_type_embeddings''' ) else: raise ValueError(f'''Unknown embedding layer with name {full_name}''' ) trace.append('''weight''' ) _snake_case = getattr(__lowerCamelCase , '''weight''' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['''attention''', '''self'''] ) _snake_case = getattr(__lowerCamelCase , '''attention''' ) _snake_case = getattr(__lowerCamelCase , '''self''' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['''attention''', '''output''', '''LayerNorm'''] ) _snake_case = getattr(__lowerCamelCase , '''attention''' ) _snake_case = getattr(__lowerCamelCase , '''output''' ) _snake_case = getattr(__lowerCamelCase , '''LayerNorm''' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['''attention''', '''output''', '''dense'''] ) _snake_case = getattr(__lowerCamelCase , '''attention''' ) _snake_case = getattr(__lowerCamelCase , '''output''' ) _snake_case = getattr(__lowerCamelCase , '''dense''' ) elif m_name == "_output_dense": # output dense trace.extend(['''output''', '''dense'''] ) _snake_case = getattr(__lowerCamelCase , '''output''' ) _snake_case = getattr(__lowerCamelCase , '''dense''' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['''output''', '''LayerNorm'''] ) _snake_case = getattr(__lowerCamelCase , '''output''' ) _snake_case = getattr(__lowerCamelCase , '''LayerNorm''' ) elif m_name == "_key_dense": # attention key trace.append('''key''' ) _snake_case = getattr(__lowerCamelCase , '''key''' ) elif m_name == "_query_dense": # attention query trace.append('''query''' ) _snake_case = getattr(__lowerCamelCase , '''query''' ) elif m_name == "_value_dense": # attention value trace.append('''value''' ) _snake_case = getattr(__lowerCamelCase , '''value''' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['''intermediate''', '''dense'''] ) _snake_case = getattr(__lowerCamelCase , '''intermediate''' ) _snake_case = getattr(__lowerCamelCase , '''dense''' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('''output''' ) _snake_case = getattr(__lowerCamelCase , '''output''' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('''bias''' ) _snake_case = getattr(__lowerCamelCase , '''bias''' ) elif m_name in ["kernel", "gamma"]: trace.append('''weight''' ) _snake_case = getattr(__lowerCamelCase , '''weight''' ) else: logger.warning(f'''Ignored {m_name}''' ) # for certain layers reshape is necessary _snake_case = '''.'''.join(__lowerCamelCase ) if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowerCamelCase ) or re.match( R'''(\S+)\.attention\.output\.dense\.weight''' , __lowerCamelCase ): _snake_case = array.reshape(pointer.data.shape ) if "kernel" in full_name: _snake_case = array.transpose() if pointer.shape == array.shape: _snake_case = torch.from_numpy(__lowerCamelCase ) else: raise ValueError( f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:''' f''' {array.shape}''' ) logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' ) return model def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> int: # Instantiate model logger.info(f'''Loading model based on config from {config_path}...''' ) _snake_case = BertConfig.from_json_file(__lowerCamelCase ) _snake_case = BertModel(__lowerCamelCase ) # Load weights from checkpoint logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' ) load_tfa_weights_in_bert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Save pytorch-model logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' ) torch.save(model.state_dict() , __lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) UpperCAmelCase__ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
430
0
def _UpperCamelCase (a__ :int ): """simple docstring""" if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
619
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __SCREAMING_SNAKE_CASE ( _a , _a ): snake_case : int = """pixel_values""" snake_case : List[Any] = False snake_case : str = TimmBackboneConfig def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ): requires_backends(self , """timm""" ) super().__init__(__lowerCAmelCase ) UpperCamelCase__ = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__lowerCAmelCase , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) UpperCamelCase__ = getattr(__lowerCAmelCase , """use_pretrained_backbone""" , __lowerCAmelCase ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. UpperCamelCase__ = config.out_indices if getattr(__lowerCAmelCase , """out_indices""" , __lowerCAmelCase ) is not None else (-1,) UpperCamelCase__ = timm.create_model( config.backbone , pretrained=__lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowerCAmelCase , **__lowerCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCamelCase__ = self._backbone.return_layers UpperCamelCase__ = {layer["""module"""]: str(__lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowerCAmelCase ) @classmethod def _lowerCamelCase ( cls , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ): requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig UpperCamelCase__ = kwargs.pop("""config""" , TimmBackboneConfig() ) UpperCamelCase__ = kwargs.pop("""use_timm_backbone""" , __lowerCAmelCase ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) UpperCamelCase__ = kwargs.pop("""num_channels""" , config.num_channels ) UpperCamelCase__ = kwargs.pop("""features_only""" , config.features_only ) UpperCamelCase__ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) UpperCamelCase__ = kwargs.pop("""out_indices""" , config.out_indices ) UpperCamelCase__ = TimmBackboneConfig( backbone=__lowerCAmelCase , num_channels=__lowerCAmelCase , features_only=__lowerCAmelCase , use_pretrained_backbone=__lowerCAmelCase , out_indices=__lowerCAmelCase , ) return super()._from_config(__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): pass def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCamelCase__ = self._all_layers UpperCamelCase__ = self._backbone(__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase__ = self._return_layers UpperCamelCase__ = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCamelCase__ = self._backbone(__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase__ = None UpperCamelCase__ = tuple(__lowerCAmelCase ) UpperCamelCase__ = tuple(__lowerCAmelCase ) if hidden_states is not None else None if not return_dict: UpperCamelCase__ = (feature_maps,) if output_hidden_states: UpperCamelCase__ = output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowerCAmelCase , hidden_states=__lowerCAmelCase , attentions=__lowerCAmelCase )
619
1
from __future__ import annotations from typing import Generic, TypeVar _UpperCAmelCase : Union[str, Any] = TypeVar("T") class lowercase ( Generic[T] ): def __init__( self , A_ ) -> None: """simple docstring""" UpperCamelCase = data UpperCamelCase = self UpperCamelCase = 0 class lowercase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" # map from node name to the node object UpperCamelCase = {} def __UpperCamelCase ( self , A_ ) -> None: """simple docstring""" # create a new set with x as its member UpperCamelCase = DisjointSetTreeNode(A_ ) def __UpperCamelCase ( self , A_ ) -> DisjointSetTreeNode[T]: """simple docstring""" # find the set x belongs to (with path-compression) UpperCamelCase = self.map[data] if elem_ref != elem_ref.parent: UpperCamelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __UpperCamelCase ( self , A_ , A_ ) -> None: """simple docstring""" # helper function for union operation if nodea.rank > nodea.rank: UpperCamelCase = nodea else: UpperCamelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __UpperCamelCase ( self , A_ , A_ ) -> None: """simple docstring""" # merge 2 disjoint sets self.link(self.find_set(A_ ) , self.find_set(A_ ) ) class lowercase ( Generic[T] ): def __init__( self ) -> None: """simple docstring""" # connections: map from the node to the neighbouring nodes (with weights) UpperCamelCase = {} def __UpperCamelCase ( self , A_ ) -> None: """simple docstring""" # add a node ONLY if its not present in the graph if node not in self.connections: UpperCamelCase = {} def __UpperCamelCase ( self , A_ , A_ , A_ ) -> None: """simple docstring""" # add an edge with the given weight self.add_node(A_ ) self.add_node(A_ ) UpperCamelCase = weight UpperCamelCase = weight def __UpperCamelCase ( self ) -> GraphUndirectedWeighted[T]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda A_ : x[2] ) # creating the disjoint set UpperCamelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(A_ ) # MST generation UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: UpperCamelCase , UpperCamelCase , UpperCamelCase = edges[index] index += 1 UpperCamelCase = disjoint_set.find_set(A_ ) UpperCamelCase = disjoint_set.find_set(A_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(A_ , A_ , A_ ) disjoint_set.union(A_ , A_ ) return graph
3
def A ( lowercase , lowercase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b" UpperCamelCase = max(len(lowercase ) , len(lowercase ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
1
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : Tuple = """sshleifer/bart-tiny-random""" _lowerCamelCase : Optional[int] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
87
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : str, UpperCAmelCase_ : LevitConfig, UpperCAmelCase_ : Path, UpperCAmelCase_ : bool = True ) -> int: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": A__ = timm.create_model("levit_128s", pretrained=UpperCAmelCase_ ) else: A__ = timm.create_model("levit_128", pretrained=UpperCAmelCase_ ) if hidden_sizes == 192: A__ = timm.create_model("levit_192", pretrained=UpperCAmelCase_ ) if hidden_sizes == 256: A__ = timm.create_model("levit_256", pretrained=UpperCAmelCase_ ) if hidden_sizes == 384: A__ = timm.create_model("levit_384", pretrained=UpperCAmelCase_ ) from_model.eval() A__ = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval() A__ = OrderedDict() A__ = from_model.state_dict() A__ = list(from_model.state_dict().keys() ) A__ = list(our_model.state_dict().keys() ) print(len(UpperCAmelCase_ ), len(UpperCAmelCase_ ) ) for i in range(len(UpperCAmelCase_ ) ): A__ = weights[og_keys[i]] our_model.load_state_dict(UpperCAmelCase_ ) A__ = torch.randn((2, 3, 224, 224) ) A__ = from_model(UpperCAmelCase_ ) A__ = our_model(UpperCAmelCase_ ).logits assert torch.allclose(UpperCAmelCase_, UpperCAmelCase_ ), "The model logits don't match the original one." A__ = name print(UpperCAmelCase_ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A__ = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def _lowerCamelCase ( UpperCAmelCase_ : Path, UpperCAmelCase_ : str = None, UpperCAmelCase_ : bool = True ) -> Union[str, Any]: """simple docstring""" A__ = "imagenet-1k-id2label.json" A__ = 1000 A__ = (1, num_labels) A__ = "huggingface/label-files" A__ = num_labels A__ = json.load(open(hf_hub_download(UpperCAmelCase_, UpperCAmelCase_, repo_type="dataset" ), "r" ) ) A__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(UpperCAmelCase_, num_labels=UpperCAmelCase_, idalabel=UpperCAmelCase_, labelaid=UpperCAmelCase_ ) A__ = { "levit-128S": 128, "levit-128": 128, "levit-192": 192, "levit-256": 256, "levit-384": 384, } A__ = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], UpperCAmelCase_, names_to_config[model_name], UpperCAmelCase_, UpperCAmelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
104
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
58
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase): """simple docstring""" @register_to_config def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str: super().__init__() _lowerCAmelCase =nn.Sequential( nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , ) _lowerCAmelCase =nn.Embedding(__A , __A ) _lowerCAmelCase =False _lowerCAmelCase =nn.Linear(__A , __A , bias=__A ) _lowerCAmelCase =nn.Dropout(p=__A ) _lowerCAmelCase =nn.ModuleList() for lyr_num in range(__A ): # FiLM conditional T5 decoder _lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A ) self.decoders.append(__A ) _lowerCAmelCase =TaLayerNorm(__A ) _lowerCAmelCase =nn.Dropout(p=__A ) _lowerCAmelCase =nn.Linear(__A , __A , bias=__A ) def UpperCamelCase__ ( self , __A , __A ) -> Any: _lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. _lowerCAmelCase =get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) _lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) _lowerCAmelCase =decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. _lowerCAmelCase =torch.broadcast_to( torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , ) _lowerCAmelCase =self.position_encoding(__A ) _lowerCAmelCase =self.continuous_inputs_projection(__A ) inputs += position_encodings _lowerCAmelCase =self.dropout(__A ) # decoder: No padding present. _lowerCAmelCase =torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. _lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks] # cross attend style: concat encodings _lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) _lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: _lowerCAmelCase =lyr( __A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0] _lowerCAmelCase =self.decoder_norm(__A ) _lowerCAmelCase =self.post_dropout(__A ) _lowerCAmelCase =self.spec_out(__A ) return spec_out class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]: super().__init__() _lowerCAmelCase =nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) ) def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any: _lowerCAmelCase =self.layer[0]( __A , conditioning_emb=__A , attention_mask=__A , ) if encoder_hidden_states is not None: _lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) _lowerCAmelCase =self.layer[1]( __A , key_value_states=__A , attention_mask=__A , ) # Apply Film Conditional Feed Forward layer _lowerCAmelCase =self.layer[-1](__A , __A ) return (hidden_states,) class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A , __A , __A ) -> Optional[Any]: super().__init__() _lowerCAmelCase =TaLayerNorm(__A ) _lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A ) _lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A ) _lowerCAmelCase =nn.Dropout(__A ) def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]: # pre_self_attention_layer_norm _lowerCAmelCase =self.layer_norm(__A ) if conditioning_emb is not None: _lowerCAmelCase =self.FiLMLayer(__A , __A ) # Self-attention block _lowerCAmelCase =self.attention(__A ) _lowerCAmelCase =hidden_states + self.dropout(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]: super().__init__() _lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A ) _lowerCAmelCase =TaLayerNorm(__A , eps=__A ) _lowerCAmelCase =nn.Dropout(__A ) def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple: _lowerCAmelCase =self.layer_norm(__A ) _lowerCAmelCase =self.attention( __A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , ) _lowerCAmelCase =hidden_states + self.dropout(__A ) return layer_output class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A , __A , __A ) -> Optional[Any]: super().__init__() _lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A ) _lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A ) _lowerCAmelCase =TaLayerNorm(__A , eps=__A ) _lowerCAmelCase =nn.Dropout(__A ) def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]: _lowerCAmelCase =self.layer_norm(__A ) if conditioning_emb is not None: _lowerCAmelCase =self.film(__A , __A ) _lowerCAmelCase =self.DenseReluDense(__A ) _lowerCAmelCase =hidden_states + self.dropout(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A , __A ) -> Union[str, Any]: super().__init__() _lowerCAmelCase =nn.Linear(__A , __A , bias=__A ) _lowerCAmelCase =nn.Linear(__A , __A , bias=__A ) _lowerCAmelCase =nn.Linear(__A , __A , bias=__A ) _lowerCAmelCase =nn.Dropout(__A ) _lowerCAmelCase =NewGELUActivation() def UpperCamelCase__ ( self , __A ) -> List[Any]: _lowerCAmelCase =self.act(self.wi_a(__A ) ) _lowerCAmelCase =self.wi_a(__A ) _lowerCAmelCase =hidden_gelu * hidden_linear _lowerCAmelCase =self.dropout(__A ) _lowerCAmelCase =self.wo(__A ) return hidden_states class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A=1E-6 ) -> int: super().__init__() _lowerCAmelCase =nn.Parameter(torch.ones(__A ) ) _lowerCAmelCase =eps def UpperCamelCase__ ( self , __A ) -> Dict: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 _lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A ) _lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: _lowerCAmelCase =hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def UpperCamelCase__ ( self , __A ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) )) class SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , __A , __A ) -> Optional[Any]: super().__init__() _lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A ) def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]: _lowerCAmelCase =self.scale_bias(__A ) _lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 ) _lowerCAmelCase =x * (1 + scale) + shift return x
58
1
from typing import Any def a ( a , a , a , a , a , ) ->list: '''simple docstring''' _validation( a__ , a__ , a__ , a__ , a__ , ) # Creates data structures and fill initial step SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} for state in states_space: SCREAMING_SNAKE_CASE = observations_space[0] SCREAMING_SNAKE_CASE = ( initial_probabilities[state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(a__ ) ): SCREAMING_SNAKE_CASE = observations_space[o] SCREAMING_SNAKE_CASE = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function SCREAMING_SNAKE_CASE = '''''' SCREAMING_SNAKE_CASE = -1 for k_state in states_space: SCREAMING_SNAKE_CASE = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: SCREAMING_SNAKE_CASE = probability SCREAMING_SNAKE_CASE = k_state # Update probabilities and pointers dicts SCREAMING_SNAKE_CASE = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) SCREAMING_SNAKE_CASE = arg_max # The final observation SCREAMING_SNAKE_CASE = observations_space[len(a__ ) - 1] # argmax for given final observation SCREAMING_SNAKE_CASE = '''''' SCREAMING_SNAKE_CASE = -1 for k_state in states_space: SCREAMING_SNAKE_CASE = probabilities[(k_state, final_observation)] if probability > max_probability: SCREAMING_SNAKE_CASE = probability SCREAMING_SNAKE_CASE = k_state SCREAMING_SNAKE_CASE = arg_max # Process pointers backwards SCREAMING_SNAKE_CASE = last_state SCREAMING_SNAKE_CASE = [] for o in range(len(a__ ) - 1 , -1 , -1 ): result.append(a__ ) SCREAMING_SNAKE_CASE = pointers[previous, observations_space[o]] result.reverse() return result def a ( a , a , a , a , a , ) ->None: '''simple docstring''' _validate_not_empty( a__ , a__ , a__ , a__ , a__ , ) _validate_lists(a__ , a__ ) _validate_dicts( a__ , a__ , a__ ) def a ( a , a , a , a , a , ) ->None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def a ( a , a ) ->None: '''simple docstring''' _validate_list(a__ , '''observations_space''' ) _validate_list(a__ , '''states_space''' ) def a ( a , a ) ->None: '''simple docstring''' if not isinstance(_object , a__ ): SCREAMING_SNAKE_CASE = F"""{var_name} must be a list""" raise ValueError(a__ ) else: for x in _object: if not isinstance(a__ , a__ ): SCREAMING_SNAKE_CASE = F"""{var_name} must be a list of strings""" raise ValueError(a__ ) def a ( a , a , a , ) ->None: '''simple docstring''' _validate_dict(a__ , '''initial_probabilities''' , a__ ) _validate_nested_dict(a__ , '''transition_probabilities''' ) _validate_nested_dict(a__ , '''emission_probabilities''' ) def a ( a , a ) ->None: '''simple docstring''' _validate_dict(_object , a__ , a__ ) for x in _object.values(): _validate_dict(a__ , a__ , a__ , a__ ) def a ( a , a , a , a = False ) ->None: '''simple docstring''' if not isinstance(_object , a__ ): SCREAMING_SNAKE_CASE = F"""{var_name} must be a dict""" raise ValueError(a__ ) if not all(isinstance(a__ , a__ ) for x in _object ): SCREAMING_SNAKE_CASE = F"""{var_name} all keys must be strings""" raise ValueError(a__ ) if not all(isinstance(a__ , a__ ) for x in _object.values() ): SCREAMING_SNAKE_CASE = '''nested dictionary ''' if nested else '''''' SCREAMING_SNAKE_CASE = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(a__ ) if __name__ == "__main__": from doctest import testmod testmod()
201
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() A : str = logging.get_logger() @dataclass class __A: snake_case_ = 42 snake_case_ = field(default_factory=a ) snake_case_ = field(default_factory=a ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(_snake_case ) def __call__( self , _snake_case ) -> Any: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_snake_case ) [x.remove() for x in self.handles] return self @property def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __A: snake_case_ = 42 snake_case_ = 42 snake_case_ = 0 snake_case_ = field(default_factory=a ) snake_case_ = field(default_factory=a ) def __call__( self , _snake_case ) -> Dict: '''simple docstring''' __a = Tracker(self.dest )(_snake_case ).parametrized __a = Tracker(self.src )(_snake_case ).parametrized __a = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) ) __a = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) ) if len(_snake_case ) != len(_snake_case ): raise Exception( F"""Numbers of operations are different. Source module has {len(_snake_case )} operations while""" F""" destination module has {len(_snake_case )}.""" ) for dest_m, src_m in zip(_snake_case , _snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def __lowerCAmelCase ( a__ , a__ , a__ , a__ = True ) -> str: print(F"""Converting {name}...""" ) with torch.no_grad(): __a = timm.create_model(a__ , pretrained=a__ ).eval() __a = ResNetForImageClassification(a__ ).eval() __a = ModuleTransfer(src=a__ , dest=a__ ) __a = torch.randn((1, 3, 224, 224) ) module_transfer(a__ ) assert torch.allclose(from_model(a__ ) , our_model(a__ ).logits ), "The model logits don't match the original one." __a = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(a__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a__ , ) # we can use the convnext one __a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a__ , ) print(F"""Pushed {checkpoint_name}""" ) def __lowerCAmelCase ( a__ , a__ = None , a__ = True ) -> List[Any]: __a = '''imagenet-1k-id2label.json''' __a = 1000 __a = (1, num_labels) __a = '''huggingface/label-files''' __a = num_labels __a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) ) __a = {int(a__ ): v for k, v in idalabel.items()} __a = idalabel __a = {v: k for k, v in idalabel.items()} __a = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ ) __a = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(a__ , names_to_config[model_name] , a__ , a__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(a__ , a__ , a__ , a__ ) return config, expected_shape if __name__ == "__main__": A : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help=( 'The name of the model you wish to convert, it must be one of the supported resnet* architecture,' ' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.', ) A : List[Any] = parser.parse_args() A : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
219
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _A : Optional[Any] = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Dict = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys _A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
700
'''simple docstring''' class _lowercase : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Dict: __lowerCAmelCase = data __lowerCAmelCase = previous __lowerCAmelCase = next_node def __str__( self : Optional[Any] ) -> str: return f"""{self.data}""" def a ( self : Optional[Any] ) -> int: return self.data def a ( self : List[str] ) -> List[str]: return self.next def a ( self : List[Any] ) -> Union[str, Any]: return self.previous class _lowercase : '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]: __lowerCAmelCase = head def __iter__( self : Tuple ) -> Union[str, Any]: return self def a ( self : Optional[Any] ) -> Tuple: if not self.current: raise StopIteration else: __lowerCAmelCase = self.current.get_data() __lowerCAmelCase = self.current.get_next() return value class _lowercase : '''simple docstring''' def __init__( self : Dict ) -> int: __lowerCAmelCase = None # First node in list __lowerCAmelCase = None # Last node in list def __str__( self : List[str] ) -> List[str]: __lowerCAmelCase = self.head __lowerCAmelCase = [] while current is not None: nodes.append(current.get_data() ) __lowerCAmelCase = current.get_next() return " ".join(str(SCREAMING_SNAKE_CASE__ ) for node in nodes ) def __contains__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: __lowerCAmelCase = self.head while current: if current.get_data() == value: return True __lowerCAmelCase = current.get_next() return False def __iter__( self : List[Any] ) -> int: return LinkedListIterator(self.head ) def a ( self : List[Any] ) -> List[str]: if self.head: return self.head.get_data() return None def a ( self : Dict ) -> List[Any]: if self.tail: return self.tail.get_data() return None def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node ) -> None: if self.head is None: __lowerCAmelCase = node __lowerCAmelCase = node else: self.insert_before_node(self.head , SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Node ) -> None: if self.head is None: self.set_head(SCREAMING_SNAKE_CASE__ ) else: self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ ) if self.head is None: self.set_head(SCREAMING_SNAKE_CASE__ ) else: self.set_tail(SCREAMING_SNAKE_CASE__ ) def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None: __lowerCAmelCase = node __lowerCAmelCase = node.previous if node.get_previous() is None: __lowerCAmelCase = node_to_insert else: __lowerCAmelCase = node_to_insert __lowerCAmelCase = node_to_insert def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> None: __lowerCAmelCase = node __lowerCAmelCase = node.next if node.get_next() is None: __lowerCAmelCase = node_to_insert else: __lowerCAmelCase = node_to_insert __lowerCAmelCase = node_to_insert def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCAmelCase = 1 __lowerCAmelCase = Node(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase = self.head while node: if current_position == position: self.insert_before_node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return current_position += 1 __lowerCAmelCase = node.next self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE__ ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Node: __lowerCAmelCase = self.head while node: if node.get_data() == item: return node __lowerCAmelCase = node.get_next() raise Exception("""Node not found""" ) def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]: if (node := self.get_node(SCREAMING_SNAKE_CASE__ )) is not None: if node == self.head: __lowerCAmelCase = self.head.get_next() if node == self.tail: __lowerCAmelCase = self.tail.get_previous() self.remove_node_pointers(SCREAMING_SNAKE_CASE__ ) @staticmethod def a ( SCREAMING_SNAKE_CASE__ : Node ) -> None: if node.get_next(): __lowerCAmelCase = node.previous if node.get_previous(): __lowerCAmelCase = node.next __lowerCAmelCase = None __lowerCAmelCase = None def a ( self : Optional[int] ) -> Any: return self.head is None def UpperCamelCase_ ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
330
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _lowercase ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ): __A : Tuple = s.rsplit(UpperCamelCase__, UpperCamelCase__ ) return new.join(UpperCamelCase__ ) def _lowercase ( UpperCamelCase__ : Union[str, Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def _lowercase ( UpperCamelCase__ : Optional[Any] ): __A : List[str] = {} __A : List[Any] = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __A : str = key.replace(f"""{group_key}.""", f"""{group_key}.group.""" ) if "res_path" in key: __A : str = key.replace('res_path.', 'res_path.path.' ) if key.endswith('.w' ): __A : Optional[Any] = rreplace(UpperCamelCase__, '.w', '.weight', 1 ) if key.endswith('.b' ): __A : Union[str, Any] = rreplace(UpperCamelCase__, '.b', '.bias', 1 ) __A : str = value.float() return upgrade @torch.no_grad() def _lowercase ( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : str=None, UpperCamelCase__ : List[Any]=True ): from dall_e import Encoder __A : Optional[Any] = Encoder() if os.path.exists(UpperCamelCase__ ): __A : int = torch.load(UpperCamelCase__ ) else: __A : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ): __A : Optional[Any] = ckpt.state_dict() encoder.load_state_dict(UpperCamelCase__ ) if config_path is not None: __A : List[str] = FlavaImageCodebookConfig.from_pretrained(UpperCamelCase__ ) else: __A : str = FlavaImageCodebookConfig() __A : Any = FlavaImageCodebook(UpperCamelCase__ ).eval() __A : str = encoder.state_dict() __A : Any = upgrade_state_dict(UpperCamelCase__ ) hf_model.load_state_dict(UpperCamelCase__ ) __A : Optional[Any] = hf_model.state_dict() __A : str = count_parameters(UpperCamelCase__ ) __A : Tuple = count_parameters(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(UpperCamelCase__ ) else: return hf_state_dict if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
365
'''simple docstring''' import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase ): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError('Destination width/height should be > 0' ) __A : Dict = img __A : Dict = img.shape[1] __A : Tuple = img.shape[0] __A : List[str] = dst_width __A : Tuple = dst_height __A : Optional[Any] = self.src_w / self.dst_w __A : List[str] = self.src_h / self.dst_h __A : Dict = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def snake_case__ ( self ): """simple docstring""" for i in range(self.dst_h ): for j in range(self.dst_w ): __A : List[str] = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )] def snake_case__ ( self , __lowercase ): """simple docstring""" return int(self.ratio_x * x ) def snake_case__ ( self , __lowercase ): """simple docstring""" return int(self.ratio_y * y ) if __name__ == "__main__": UpperCAmelCase_ , UpperCAmelCase_ : str = 8_0_0, 6_0_0 UpperCAmelCase_ : str = imread('image_data/lena.jpg', 1) UpperCAmelCase_ : List[str] = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
365
1
from __future__ import annotations class __UpperCamelCase : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: a__ , a__ = text, pattern a__ , a__ = len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _UpperCAmelCase ( self ) -> list[int]: # searches pattern in text and returns index positions a__ = [] for i in range(self.textLen - self.patLen + 1 ): a__ = self.mismatch_in_text(SCREAMING_SNAKE_CASE ) if mismatch_index == -1: positions.append(SCREAMING_SNAKE_CASE ) else: a__ = self.match_in_pattern(self.text[mismatch_index] ) a__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions a_ : str = 'ABAABA' a_ : Optional[Any] = 'AB' a_ : int = BoyerMooreSearch(text, pattern) a_ : Dict = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
148
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __UpperCamelCase ( _lowercase , unittest.TestCase ): """simple docstring""" _lowercase : Dict = SpeechTaTokenizer _lowercase : Optional[int] = False _lowercase : List[Any] = True def _UpperCAmelCase ( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing a__ = SpeechTaTokenizer(SCREAMING_SNAKE_CASE ) a__ = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) a__ = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: a__ = '''this is a test''' a__ = '''this is a test''' return input_text, output_text def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2_0 , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]: a__ , a__ = self.get_input_output_texts(SCREAMING_SNAKE_CASE ) a__ = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) a__ = tokenizer.decode(SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE ) return text, ids def _UpperCAmelCase ( self ) -> Tuple: a__ = '''<pad>''' a__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self ) -> str: a__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 8_1 ) def _UpperCAmelCase ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 7_9 ) def _UpperCAmelCase ( self ) -> str: a__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): a__ = tokenizer.vocab_size a__ = len(SCREAMING_SNAKE_CASE ) self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) a__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] a__ = tokenizer.add_tokens(SCREAMING_SNAKE_CASE ) a__ = tokenizer.vocab_size a__ = len(SCREAMING_SNAKE_CASE ) self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) self.assertEqual(SCREAMING_SNAKE_CASE , all_size + len(SCREAMING_SNAKE_CASE ) ) a__ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) a__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} a__ = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE ) a__ = tokenizer.vocab_size a__ = len(SCREAMING_SNAKE_CASE ) self.assertNotEqual(SCREAMING_SNAKE_CASE , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) self.assertEqual(SCREAMING_SNAKE_CASE , all_size_a + len(SCREAMING_SNAKE_CASE ) ) a__ = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Optional[int]: a__ = self.get_tokenizer() a__ = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , ) a__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) a__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] ) # fmt: on a__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) self.assertListEqual( SCREAMING_SNAKE_CASE , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def _UpperCAmelCase ( self ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. a__ = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off a__ = { '''input_ids''': [ [4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2], [4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=SCREAMING_SNAKE_CASE , )
148
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __lowerCAmelCase ( __magic_name__ = "" ): _lowercase: List[Any] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" _lowercase: List[Any] = BeautifulSoup(requests.get(__magic_name__ ).text , "html.parser" ) _lowercase: int = soup.find_all("td" , attrs="titleColumn" ) _lowercase: Union[str, Any] = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__magic_name__ , __magic_name__ ) } def __lowerCAmelCase ( __magic_name__ = "IMDb_Top_250_Movies.csv" ): _lowercase: Dict = get_imdb_top_aaa_movies() with open(__magic_name__ , "w" , newline="" ) as out_file: _lowercase: List[Any] = csv.writer(__magic_name__ ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
226
def __lowerCAmelCase ( __magic_name__ ): if not isinstance(__magic_name__ , __magic_name__ ): raise TypeError("only integers accepted as input" ) else: _lowercase: Optional[Any] = str(abs(__magic_name__ ) ) _lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )] for index in range(len(__magic_name__ ) ): num_transpositions[index].pop(__magic_name__ ) return max( int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
226
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _snake_case ( __lowerCAmelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE : Optional[Any] = "LayoutLMv2ImageProcessor" SCREAMING_SNAKE_CASE : List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCamelCase__ , ) lowerCAmelCase = kwargs.pop('feature_extractor' ) lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor lowerCAmelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase = features['''words'''] lowerCAmelCase = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , ) # add pixel values lowerCAmelCase = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowerCAmelCase = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['overflow_to_sample_mapping'] ) lowerCAmelCase = images return encoded_inputs def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F' {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}' ) return images_with_overflow def _SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def _SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase__ , ) return self.image_processor_class @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCamelCase__ , ) return self.image_processor
718
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _snake_case ( a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE : List[str] = RoCBertTokenizer SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : List[Any] = filter_non_english def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] lowerCAmelCase = {} lowerCAmelCase = {} for i, value in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase = i lowerCAmelCase = i lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(_SCREAMING_SNAKE_CASE , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCAmelCase = {} for i, token in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase = i lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: lowerCAmelCase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' lowerCAmelCase = tokenizer_r.encode_plus( _SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , ) lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False lowerCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = ['的', '人', '有'] lowerCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase = True lowerCAmelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = False lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase = [ F'##{token}' if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE ) ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowerCAmelCase = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): lowerCAmelCase = '你好,你是谁' lowerCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.prepare_for_model( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
514
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE : str = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' lowercase : Any =XGLMTokenizer lowercase : Tuple =XGLMTokenizerFast lowercase : List[Any] =True lowercase : Optional[int] =True def UpperCamelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase_ :Dict = XGLMTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ): lowercase_ :List[Any] = '''<pad>''' lowercase_ :Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def UpperCamelCase ( self ): lowercase_ :int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(len(UpperCamelCase_ ) , 1008 ) def UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1008 ) def UpperCamelCase ( self ): lowercase_ :str = XGLMTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) lowercase_ :Optional[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowercase_ :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase_ :List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def UpperCamelCase ( self ): return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) def UpperCamelCase ( self ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase_ , f.name ) lowercase_ :str = XGLMTokenizer(f.name , keep_accents=UpperCamelCase_ ) lowercase_ :Any = pickle.dumps(UpperCamelCase_ ) pickle.loads(UpperCamelCase_ ) def UpperCamelCase ( self ): if not self.test_rust_tokenizer: return lowercase_ :int = self.get_tokenizer() lowercase_ :Tuple = self.get_rust_tokenizer() lowercase_ :Union[str, Any] = '''I was born in 92000, and this is falsé.''' lowercase_ :Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) lowercase_ :Dict = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :List[str] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowercase_ :Optional[int] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowercase_ :List[Any] = self.get_rust_tokenizer() lowercase_ :Optional[int] = tokenizer.encode(UpperCamelCase_ ) lowercase_ :List[str] = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def UpperCamelCase ( self ): lowercase_ :str = '''Hello World!''' lowercase_ :Any = [2, 3_1227, 4447, 35] self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def UpperCamelCase ( self ): lowercase_ :Dict = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth''' ) # fmt: off lowercase_ :List[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def UpperCamelCase ( self ): # fmt: off lowercase_ :Optional[Any] = { '''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''facebook/xglm-564M''' , padding=UpperCamelCase_ , )
257
import math def UpperCamelCase ( _a ) -> bool: '''simple docstring''' lowercase_ :int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(_a ) def UpperCamelCase ( _a = 1 / 1_2_3_4_5 ) -> int: '''simple docstring''' lowercase_ :Union[str, Any] = 0 lowercase_ :List[str] = 0 lowercase_ :int = 3 while True: lowercase_ :Optional[Any] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(_a ): lowercase_ :List[Any] = int(_a ) total_partitions += 1 if check_partition_perfect(_a ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(_a ) integer += 1 if __name__ == "__main__": print(f"{solution() = }")
257
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowercase_ = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowercase_ = logging.get_logger(__name__) class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = """maskformer""" __snake_case = {"""hidden_size""": """mask_feature_size"""} __snake_case = ["""resnet""", """swin"""] __snake_case = ["""detr"""] def __init__( self: Union[str, Any] , a: int = 256 , a: int = 256 , a: float = 0.1 , a: bool = False , a: Optional[Dict] = None , a: Optional[Dict] = None , a: float = 0.0_2 , a: float = 1.0 , a: float = 1.0 , a: float = 1.0 , a: float = 20.0 , a: Optional[bool] = None , **a: Optional[Any] , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k __lowerCamelCase : Optional[Any] = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(a , a ): __lowerCamelCase : Any = backbone_config.pop('model_type' ) __lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : int = config_class.from_dict(a ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' F'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 __lowerCamelCase : Any = DetrConfig() else: # verify that the decoder is supported __lowerCamelCase : Union[str, Any] = ( decoder_config.pop('model_type' ) if isinstance(a , a ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'Transformer Decoder {decoder_type} not supported, please use one of' F' {",".join(self.decoders_supported )}' ) if isinstance(a , a ): __lowerCamelCase : str = CONFIG_MAPPING[decoder_type] __lowerCamelCase : Union[str, Any] = config_class.from_dict(a ) __lowerCamelCase : int = backbone_config __lowerCamelCase : List[str] = decoder_config # main feature dimension for the model __lowerCamelCase : Dict = fpn_feature_size __lowerCamelCase : Tuple = mask_feature_size # initializer __lowerCamelCase : Optional[Any] = init_std __lowerCamelCase : Optional[int] = init_xavier_std # Hungarian matcher && loss __lowerCamelCase : Optional[Any] = cross_entropy_weight __lowerCamelCase : Any = dice_weight __lowerCamelCase : List[str] = mask_weight __lowerCamelCase : Optional[int] = use_auxiliary_loss __lowerCamelCase : Union[str, Any] = no_object_weight __lowerCamelCase : Tuple = output_auxiliary_logits __lowerCamelCase : Dict = self.decoder_config.encoder_attention_heads __lowerCamelCase : int = self.decoder_config.num_hidden_layers super().__init__(**a ) @classmethod def _snake_case ( cls: str , a: PretrainedConfig , a: PretrainedConfig , **a: Tuple ): return cls( backbone_config=a , decoder_config=a , **a , ) def _snake_case ( self: Tuple ): __lowerCamelCase : int = copy.deepcopy(self.__dict__ ) __lowerCamelCase : str = self.backbone_config.to_dict() __lowerCamelCase : List[str] = self.decoder_config.to_dict() __lowerCamelCase : int = self.__class__.model_type return output
712
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase_ = logging.getLogger(__name__) @dataclass class A_ : '''simple docstring''' __snake_case = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __snake_case = field( default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __snake_case = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) __snake_case = field( default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __snake_case = field(default=__UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __snake_case = field( default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class A_ : '''simple docstring''' __snake_case = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) __snake_case = field( default=__UpperCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) __snake_case = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __snake_case = field( default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def UpperCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) __lowerCamelCase : List[Any] = import_module('tasks' ) try: __lowerCamelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , model_args.task_type ) __lowerCamelCase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __lowerCamelCase : Dict = token_classification_task.get_labels(data_args.labels ) __lowerCamelCase : Dict[int, str] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} , cache_dir=model_args.cache_dir , ) __lowerCamelCase : List[str] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __lowerCamelCase : List[Any] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , ) # Get datasets __lowerCamelCase : Dict = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowerCamelCase : Any = ( TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple[List[int], List[int]]: __lowerCamelCase : Any = np.argmax(SCREAMING_SNAKE_CASE__ , axis=2 ) __lowerCamelCase , __lowerCamelCase : List[str] = preds.shape __lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] __lowerCamelCase : List[str] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict: __lowerCamelCase , __lowerCamelCase : List[str] = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "precision": precision_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "recall": recall_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "f1": fa_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), } # Data collator __lowerCamelCase : Any = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowerCamelCase : Union[str, Any] = Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCamelCase : List[str] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowerCamelCase : int = trainer.evaluate() __lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) writer.write('%s = %s\n' % (key, value) ) results.update(SCREAMING_SNAKE_CASE__ ) # Predict if training_args.do_predict: __lowerCamelCase : Optional[Any] = TokenClassificationDataset( token_classification_task=SCREAMING_SNAKE_CASE__ , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = trainer.predict(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase : Optional[Any] = align_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __lowerCamelCase : Union[str, Any] = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return results def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
230
0