code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase_ : Any = logging.get_logger(__name__) lowerCamelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCamelCase_ : str = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } lowerCamelCase_ : Optional[int] = { """moussaKam/mbarthez""": 1_0_2_4, """moussaKam/barthez""": 1_0_2_4, """moussaKam/barthez-orangesum-title""": 1_0_2_4, } lowerCamelCase_ : Tuple = """▁""" class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["input_ids", "attention_mask"] def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token a ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) a =vocab_file a =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} a =len(self.sp_model ) - 1 a ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a =[self.cls_token_id] a =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]: a =[self.sep_token_id] a =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE ( self ) -> Any: return len(self.sp_model ) def SCREAMING_SNAKE_CASE ( self ) -> int: a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]: return self.sp_model.encode(__A , out_type=__A ) def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a =self.sp_model.PieceToId(__A ) return spm_id if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__A ) def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple: a =[] a ='''''' a =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__A ) + token a =True a =[] else: current_sub_tokens.append(__A ) a =False out_string += self.sp_model.decode(__A ) return out_string.strip() def __getstate__( self ) -> Tuple: a =self.__dict__.copy() a =None return state def __setstate__( self , __A ) -> Tuple: a =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a ={} a =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]: if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return a =os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , '''wb''' ) as fi: a =self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
81
"""simple docstring""" def _A ( ): """simple docstring""" for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def _A ( lowercase ): """simple docstring""" a =1 a =2 while i * i <= n: a =0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _A ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 5_00 ) if __name__ == "__main__": print(solution())
81
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Dict = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Any = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
354
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowerCamelCase : Union[str, Any] = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys _lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
231
0
'''simple docstring''' def __lowerCamelCase ( __snake_case : str ) -> list: """simple docstring""" if n_term == "": return [] A__ : list =[] for temp in range(int(__snake_case ) ): series.append(f"1/{temp + 1}" if series else """1""" ) return series if __name__ == "__main__": __snake_case : List[str] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
134
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __snake_case : Optional[int] = logging.get_logger(__name__) __snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __snake_case : Optional[Any] = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } __snake_case : Tuple = { 'junnyu/roformer_chinese_small': 1536, 'junnyu/roformer_chinese_base': 1536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } __snake_case : Optional[Any] = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = RoFormerTokenizer def __init__( self : str , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : List[Any]="[SEP]" , lowerCAmelCase_ : Union[str, Any]="[PAD]" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> List[str]: '''simple docstring''' super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents ): A__ : int =getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""" ) ) A__ : Union[str, Any] =do_lower_case A__ : Tuple =strip_accents A__ : int =pre_tok_class(**lowerCAmelCase_ ) A__ : List[Any] =do_lower_case def __getstate__( self : Optional[int] ) -> str: '''simple docstring''' A__ : Any =self.__dict__.copy() A__ : List[str] =BertPreTokenizer() return state def __setstate__( self : int , lowerCAmelCase_ : str ) -> str: '''simple docstring''' A__ : str =d A__ : Optional[Any] =self.__dict__["""_tokenizer"""].get_vocab() A__ : Any =PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase_ ) ) def lowercase__ ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=None ) -> Optional[Any]: '''simple docstring''' A__ : List[str] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : int =[self.sep_token_id] A__ : List[str] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A__ : List[Any] =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Tuple=False , **lowerCAmelCase_ : Tuple , ) -> List[Any]: '''simple docstring''' A__ : List[Any] =BertPreTokenizer() return super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
134
1
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class __A ( A_ ): '''simple docstring''' def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]: """simple docstring""" super().__init__() lowercase__ : Optional[int] = value_function lowercase__ : Optional[int] = unet lowercase__ : Tuple = scheduler lowercase__ : Dict = env lowercase__ : int = env.get_dataset() lowercase__ : Dict = {} for key in self.data.keys(): try: lowercase__ : Optional[Any] = self.data[key].mean() except: # noqa: E722 pass lowercase__ : List[Any] = {} for key in self.data.keys(): try: lowercase__ : str = self.data[key].std() except: # noqa: E722 pass lowercase__ : Tuple = env.observation_space.shape[0] lowercase__ : Optional[int] = env.action_space.shape[0] def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple: """simple docstring""" return x_in * self.stds[key] + self.means[key] def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]: """simple docstring""" if type(_snake_case ) is dict: return {k: self.to_torch(_snake_case ) for k, v in x_in.items()} elif torch.is_tensor(_snake_case ): return x_in.to(self.unet.device ) return torch.tensor(_snake_case ,device=self.unet.device ) def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple: """simple docstring""" for key, val in cond.items(): lowercase__ : List[Any] = val.clone() return x_in def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]: """simple docstring""" lowercase__ : Any = x.shape[0] lowercase__ : Dict = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long ) for _ in range(_snake_case ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0] lowercase__ : List[str] = self.scheduler._get_variance(_snake_case ) lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance ) lowercase__ : Optional[int] = model_std * grad lowercase__ : Optional[Any] = 0 lowercase__ : str = x.detach() lowercase__ : Dict = x + scale * grad lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim ) lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 ) # TODO: verify deprecation of this kwarg lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim ) lowercase__ : Union[str, Any] = self.to_torch(_snake_case ) return x, y def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]: """simple docstring""" lowercase__ : Any = self.normalize(_snake_case ,'''observations''' ) lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 ) lowercase__ : Dict = {0: self.to_torch(_snake_case )} lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device ) lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim ) lowercase__ : str = self.to_torch(_snake_case ) # run the diffusion process lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case ) # sort output trajectories by value lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze() lowercase__ : str = x[sorted_idx] lowercase__ : str = sorted_values[:, :, : self.action_dim] lowercase__ : Optional[int] = actions.detach().cpu().numpy() lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' ) # select the action with the highest value if y is not None: lowercase__ : str = 0 else: # if we didn't run value guiding, select a random action lowercase__ : str = np.random.randint(0 ,_snake_case ) lowercase__ : int = denorm_actions[selected_index, 0] return denorm_actions
302
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class __A ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : int ) -> str: """simple docstring""" lowercase__ : List[Any] = '''hf-internal-testing/tiny-random-t5''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_snake_case ) lowercase__ : int = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ) lowercase__ : str = tokenizer('''This is me''' ,return_tensors='''pt''' ) lowercase__ : Tuple = model.to_bettertransformer() self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowercase__ : Optional[int] = model.generate(**_snake_case ) lowercase__ : List[Any] = model.reverse_bettertransformer() self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case ) lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ) self.assertFalse( any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowercase__ : int = model_reloaded.generate(**_snake_case ) self.assertTrue(torch.allclose(_snake_case ,_snake_case ) ) def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ : List[str] = '''hf-internal-testing/tiny-random-t5''' lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ) lowercase__ : Union[str, Any] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_snake_case ): model.save_pretrained(_snake_case ) lowercase__ : int = model.reverse_bettertransformer() model.save_pretrained(_snake_case )
302
1
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class A_ ( snake_case_ ): def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=1 ) -> Any: __lowerCAmelCase: Any = tokenizer __lowerCAmelCase: Optional[Any] = dataset __lowerCAmelCase: Optional[Any] = len(UpperCAmelCase ) if n_tasks is None else n_tasks __lowerCAmelCase: Any = n_copies def __iter__( self : Optional[int] ) -> Dict: __lowerCAmelCase: Tuple = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) __lowerCAmelCase: List[str] = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class A_ ( snake_case_ ): def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ) -> int: __lowerCAmelCase: int = start_length __lowerCAmelCase: str = eof_strings __lowerCAmelCase: int = tokenizer def __call__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Union[str, Any]: __lowerCAmelCase: Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) __lowerCAmelCase: Tuple = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(UpperCAmelCase ) def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Tuple = re.split('(%s)' % '|'.join(__UpperCamelCase ) , __UpperCamelCase ) # last string should be "" return "".join(string_list[:-2] ) def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=20 , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowerCAmelCase: str = defaultdict(__UpperCamelCase ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__UpperCamelCase ) ): with torch.no_grad(): __lowerCAmelCase: Optional[Any] = batch['ids'].shape[-1] __lowerCAmelCase: List[str] = accelerator.unwrap_model(__UpperCamelCase ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__UpperCamelCase , **__UpperCamelCase ) # each task is generated batch_size times __lowerCAmelCase: List[str] = batch['task_id'].repeat(__UpperCamelCase ) __lowerCAmelCase: Union[str, Any] = accelerator.pad_across_processes( __UpperCamelCase , dim=1 , pad_index=tokenizer.pad_token_id ) __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) ) __lowerCAmelCase: Optional[int] = generated_tokens.cpu().numpy() __lowerCAmelCase: int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__UpperCamelCase , __UpperCamelCase ): gen_token_dict[task].append(__UpperCamelCase ) __lowerCAmelCase: Union[str, Any] = [[] for _ in range(__UpperCamelCase )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: __lowerCAmelCase: Union[str, Any] = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) code_gens[task].append(remove_last_block(__UpperCamelCase ) ) return code_gens def _a ( ) -> List[str]: """simple docstring""" __lowerCAmelCase: int = HfArgumentParser(__UpperCamelCase ) __lowerCAmelCase: List[str] = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric __lowerCAmelCase: List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing __lowerCAmelCase: int = 'false' if args.num_workers is None: __lowerCAmelCase: str = multiprocessing.cpu_count() # Use dataset load to feed to accelerate __lowerCAmelCase: Tuple = Accelerator() set_seed(args.seed , device_specific=__UpperCamelCase ) # Load model and tokenizer __lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt ) __lowerCAmelCase: Any = tokenizer.eos_token __lowerCAmelCase: str = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings __lowerCAmelCase: List[str] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCamelCase , __UpperCamelCase )] ), } # Load evaluation dataset and metric __lowerCAmelCase: Tuple = load_dataset('openai_humaneval' ) __lowerCAmelCase: str = load_metric('code_eval' ) __lowerCAmelCase: Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) __lowerCAmelCase: Optional[int] = args.n_samples // args.batch_size __lowerCAmelCase: Dict = TokenizedDataset(__UpperCamelCase , human_eval['test'] , n_copies=__UpperCamelCase , n_tasks=__UpperCamelCase ) # do not confuse args.batch_size, which is actually the num_return_sequences __lowerCAmelCase: int = DataLoader(__UpperCamelCase , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: __lowerCAmelCase: Optional[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`' ' flag to enable code evaluation.' ) raise exception __lowerCAmelCase , __lowerCAmelCase: Any = accelerator.prepare(__UpperCamelCase , __UpperCamelCase ) __lowerCAmelCase: Tuple = complete_code( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , n_tasks=__UpperCamelCase , batch_size=args.batch_size , **__UpperCamelCase , ) if accelerator.is_main_process: __lowerCAmelCase: Any = [] for task in tqdm(range(__UpperCamelCase ) ): __lowerCAmelCase: int = human_eval['test'][task]['test'] __lowerCAmelCase: int = f'''check({human_eval['test'][task]['entry_point']})''' references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric __lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = code_eval_metric.compute( references=__UpperCamelCase , predictions=__UpperCamelCase , num_workers=args.num_workers ) print(f'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(__UpperCamelCase , __UpperCamelCase ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
322
import functools from typing import Any def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all( isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie A_ = {} A_ = "WORD_KEEPER" for word in words: A_ = trie for c in word: if c not in trie_node: A_ = {} A_ = trie_node[c] A_ = True A_ = len(__UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(__UpperCamelCase : int ) -> bool: if index == len_string: return True A_ = trie for i in range(__UpperCamelCase ,__UpperCamelCase ): A_ = trie_node.get(string[i] ,__UpperCamelCase ) if trie_node is None: return False if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
312
0
# Imports import numpy as np class lowerCamelCase__: def __init__( self: List[str] , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None ): self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Any=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[str]=None ): if red is not None: __lowerCamelCase = red if green is not None: __lowerCamelCase = green if blue is not None: __lowerCamelCase = blue if red_edge is not None: __lowerCamelCase = red_edge if nir is not None: __lowerCamelCase = nir return True def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[int]="" , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: int=None ): self.set_matricies(red=UpperCamelCase_ , green=UpperCamelCase_ , blue=UpperCamelCase_ , red_edge=UpperCamelCase_ , nir=UpperCamelCase_ ) __lowerCamelCase = { """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def lowerCAmelCase__ ( self: Optional[Any] ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def lowerCAmelCase__ ( self: Any ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def lowerCAmelCase__ ( self: Optional[Any] ): return self.nir * (self.red / (self.green**2)) def lowerCAmelCase__ ( self: Union[str, Any] ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def lowerCAmelCase__ ( self: Optional[Any] ): return (self.nir - self.red) / (self.nir + self.red) def lowerCAmelCase__ ( self: Optional[Any] ): return (self.nir - self.blue) / (self.nir + self.blue) def lowerCAmelCase__ ( self: Dict ): return (self.redEdge - self.red) / (self.redEdge + self.red) def lowerCAmelCase__ ( self: int ): return (self.nir - self.green) / (self.nir + self.green) def lowerCAmelCase__ ( self: Optional[Any] ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def lowerCAmelCase__ ( self: Any ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def lowerCAmelCase__ ( self: Optional[int] ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def lowerCAmelCase__ ( self: List[Any] ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str]=0.08 , UpperCamelCase_: List[Any]=1.22 , UpperCamelCase_: List[str]=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def lowerCAmelCase__ ( self: Union[str, Any] ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def lowerCAmelCase__ ( self: int ): return (self.nir / self.green) - 1 def lowerCAmelCase__ ( self: Any ): return (self.nir / self.redEdge) - 1 def lowerCAmelCase__ ( self: int ): return (self.red - self.blue) / self.red def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def lowerCAmelCase__ ( self: Tuple ): return self.nir - self.green def lowerCAmelCase__ ( self: Any ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def lowerCAmelCase__ ( self: int ): __lowerCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[Any]=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def lowerCAmelCase__ ( self: Tuple ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any=None , UpperCamelCase_: str=None ): return (self.nir - b) / (a * self.red) def lowerCAmelCase__ ( self: List[Any] ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def lowerCAmelCase__ ( self: List[str] ): return (self.red + self.green + self.blue) / 30.5 def lowerCAmelCase__ ( self: Tuple ): return self.nir / self.red def lowerCAmelCase__ ( self: int ): return (self.rvi() - 1) / (self.rvi() + 1) def lowerCAmelCase__ ( self: Dict ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def lowerCAmelCase__ ( self: Dict ): return self.green / (self.nir + self.red + self.green) def lowerCAmelCase__ ( self: int ): return self.nir / (self.nir + self.red + self.green) def lowerCAmelCase__ ( self: Any ): return self.red / (self.nir + self.red + self.green) def lowerCAmelCase__ ( self: Optional[int] ): return (self.green - self.red) / (self.green + self.red) def lowerCAmelCase__ ( self: str ): return (self.red - self.green) / (self.red + self.green) def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCamelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def lowerCAmelCase__ ( self: Union[str, Any] ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def lowerCAmelCase__ ( self: Dict ): return self.nir / self.red def lowerCAmelCase__ ( self: Optional[Any] ): return (self.ndvi() + 0.5) ** (1 / 2) def lowerCAmelCase__ ( self: int ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
362
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Optional[int] = IFInpaintingPipeline UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'} def lowerCAmelCase__ ( self: List[str] ): return self._get_dummy_components() def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: str=0 ): if str(UpperCamelCase_ ).startswith("""mps""" ): __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) else: __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) __lowerCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase__ ( self: Union[str, Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowerCAmelCase__ ( self: Union[str, Any] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowerCAmelCase__ ( self: Optional[int] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCAmelCase__ ( self: Any ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCAmelCase__ ( self: str ): self._test_save_load_local() def lowerCAmelCase__ ( self: str ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
29
0
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } lowerCAmelCase_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } lowerCAmelCase_ = { 'ctrl': 256, } lowerCAmelCase_ = { 'Pregnancy': 168_629, 'Christianity': 7_675, 'Explain': 106_423, 'Fitness': 63_440, 'Saving': 63_163, 'Ask': 27_171, 'Ass': 95_985, 'Joke': 163_509, 'Questions': 45_622, 'Thoughts': 49_605, 'Retail': 52_342, 'Feminism': 164_338, 'Writing': 11_992, 'Atheism': 192_263, 'Netflix': 48_616, 'Computing': 39_639, 'Opinion': 43_213, 'Alone': 44_967, 'Funny': 58_917, 'Gaming': 40_358, 'Human': 4_088, 'India': 1_331, 'Joker': 77_138, 'Diet': 36_206, 'Legal': 11_859, 'Norman': 4_939, 'Tip': 72_689, 'Weight': 52_343, 'Movies': 46_273, 'Running': 23_425, 'Science': 2_090, 'Horror': 37_793, 'Confession': 60_572, 'Finance': 12_250, 'Politics': 16_360, 'Scary': 191_985, 'Support': 12_654, 'Technologies': 32_516, 'Teenage': 66_160, 'Event': 32_769, 'Learned': 67_460, 'Notion': 182_770, 'Wikipedia': 37_583, 'Books': 6_665, 'Extract': 76_050, 'Confessions': 102_701, 'Conspiracy': 75_932, 'Links': 63_674, 'Narcissus': 150_425, 'Relationship': 54_766, 'Relationships': 134_796, 'Reviews': 41_671, 'News': 4_256, 'Translation': 26_820, 'multilingual': 128_406, } def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]: lowercase__ : Dict = set() lowercase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ : Dict = char lowercase__ : Tuple = set(__lowerCamelCase ) return pairs class __A ( A_ ): '''simple docstring''' lowerCAmelCase : List[str] = VOCAB_FILES_NAMES lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Optional[Any] = CONTROL_CODES def __init__( self : int ,_snake_case : str ,_snake_case : Tuple ,_snake_case : List[str]="<unk>" ,**_snake_case : List[str] ) -> Tuple: """simple docstring""" super().__init__(unk_token=_snake_case ,**_snake_case ) with open(_snake_case ,encoding='''utf-8''' ) as vocab_handle: lowercase__ : Dict = json.load(_snake_case ) lowercase__ : str = {v: k for k, v in self.encoder.items()} with open(_snake_case ,encoding='''utf-8''' ) as merges_handle: lowercase__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1] lowercase__ : List[Any] = [tuple(merge.split() ) for merge in merges] lowercase__ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) lowercase__ : int = {} @property def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return len(self.encoder ) def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ) -> Tuple: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ : str = tuple(_snake_case ) lowercase__ : Dict = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowercase__ : Any = get_pairs(_snake_case ) if not pairs: return token while True: lowercase__ : Dict = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ : Any = bigram lowercase__ : Tuple = [] lowercase__ : Any = 0 while i < len(_snake_case ): try: lowercase__ : Optional[Any] = word.index(_snake_case ,_snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ : int = j if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ : Tuple = tuple(_snake_case ) lowercase__ : int = new_word if len(_snake_case ) == 1: break else: lowercase__ : Optional[Any] = get_pairs(_snake_case ) lowercase__ : Union[str, Any] = '''@@ '''.join(_snake_case ) lowercase__ : List[Any] = word[:-4] lowercase__ : int = word return word def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ : List[str] = [] lowercase__ : int = re.findall(r'''\S+\n?''' ,_snake_case ) for token in words: split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) ) return split_tokens def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Tuple: """simple docstring""" return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self : Optional[int] ,_snake_case : str ) -> Optional[Any]: """simple docstring""" return self.decoder.get(_snake_case ,self.unk_token ) def UpperCAmelCase ( self : List[str] ,_snake_case : str ) -> Any: """simple docstring""" lowercase__ : List[str] = ''' '''.join(_snake_case ).replace('''@@ ''' ,'''''' ).strip() return out_string def UpperCAmelCase ( self : Dict ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : int = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ : Dict = os.path.join( _snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + '''\n''' ) lowercase__ : Optional[int] = 0 with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) lowercase__ : List[Any] = token_index writer.write(''' '''.join(_snake_case ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
16
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]: if "model" in orig_key: lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1] lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: lowercase__ : str = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: lowercase__ : Optional[Any] = '''yoso.''' + orig_key return orig_key def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]: for key in orig_state_dict.copy().keys(): lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: lowercase__ : Tuple = val lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias'''] lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict'''] lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase ) lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase ) lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase ) print(model.load_state_dict(__lowerCamelCase ) ) model.eval() model.save_pretrained(__lowerCamelCase ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for YOSO model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCAmelCase_ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
16
1
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__) @add_end_docstrings(_lowerCamelCase ) class a ( _lowerCamelCase ): """simple docstring""" def __init__( self: Optional[int] , *UpperCamelCase: Any , **UpperCamelCase: List[Any] ): """simple docstring""" super().__init__(*UpperCamelCase , **UpperCamelCase ) self.check_model_type(UpperCamelCase ) def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Any=None , UpperCamelCase: str=None , UpperCamelCase: Union[str, Any]=None , **UpperCamelCase: Union[str, Any] ): """simple docstring""" A__ , A__ = {}, {} if padding is not None: A__ = padding if truncation is not None: A__ = truncation if top_k is not None: A__ = top_k return preprocess_params, {}, postprocess_params def __call__( self: List[Any] , UpperCamelCase: Union["Image.Image", str] , UpperCamelCase: str = None , **UpperCamelCase: Dict ): """simple docstring""" if isinstance(UpperCamelCase , (Image.Image, str) ) and isinstance(UpperCamelCase , UpperCamelCase ): A__ = {"""image""": image, """question""": question} else: A__ = image A__ = super().__call__(UpperCamelCase , **UpperCamelCase ) return results def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=False , UpperCamelCase: Any=False ): """simple docstring""" A__ = load_image(inputs["""image"""] ) A__ = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase , truncation=UpperCamelCase ) A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework ) model_inputs.update(UpperCamelCase ) return model_inputs def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple ): """simple docstring""" A__ = self.model(**UpperCamelCase ) return model_outputs def UpperCamelCase ( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=5 ): """simple docstring""" if top_k > self.model.config.num_labels: A__ = self.model.config.num_labels if self.framework == "pt": A__ = model_outputs.logits.sigmoid()[0] A__ , A__ = probs.topk(UpperCamelCase ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) A__ = scores.tolist() A__ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
364
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class a : """simple docstring""" def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = patch_size A__ = tubelet_size A__ = num_frames A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = mask_ratio A__ = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame A__ = (image_size // patch_size) ** 2 A__ = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos A__ = int(mask_ratio * self.seq_length ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ): """simple docstring""" A__ = VideoMAEModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ): """simple docstring""" A__ = VideoMAEForPreTraining(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch A__ = torch.ones((self.num_masks,) ) A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) A__ = mask.expand(self.batch_size , -1 ).bool() A__ = model(UpperCamelCase , UpperCamelCase ) # model only returns predictions for masked patches A__ = mask.sum().item() A__ = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) UpperCAmelCase = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = VideoMAEModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ): """simple docstring""" A__ = copy.deepcopy(UpperCamelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch A__ = torch.ones((self.model_tester.num_masks,) ) A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) A__ = mask.expand(self.model_tester.batch_size , -1 ).bool() A__ = bool_masked_pos.to(UpperCamelCase ) if return_labels: if model_class in [ *get_values(UpperCamelCase ), ]: A__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def UpperCamelCase ( self: List[str] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""VideoMAE does not use inputs_embeds""" ) def UpperCamelCase ( self: Dict ): """simple docstring""" pass def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) @slow def UpperCamelCase ( self: Tuple ): """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = VideoMAEModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def UpperCamelCase ( self: Tuple ): """simple docstring""" if not self.has_attentions: pass else: A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = self.model_tester.seq_length - self.model_tester.num_masks A__ = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) A__ = True A__ = False A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) A__ = len(UpperCamelCase ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ): A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) A__ = self.model_tester.seq_length - self.model_tester.num_masks A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" pass def _snake_case ( ): A__ = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) A__ = np.load(UpperCAmelCase_ ) return list(UpperCAmelCase_ ) @require_torch @require_vision class a ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase ( self: Tuple ): """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to( UpperCamelCase ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase ) A__ = self.default_image_processor A__ = prepare_video() A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # add boolean mask, indicating which patches to mask A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) A__ = torch.load(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size([1, 14_08, 15_36] ) A__ = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) A__ = torch.tensor([0.5_142] , device=UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to( UpperCamelCase ) with torch.no_grad(): A__ = model(**UpperCamelCase ) A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase ) self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
69
0
"""simple docstring""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging _lowercase : int = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Any ): __UpperCAmelCase = r'''\w+[.]\d+''' __UpperCAmelCase = re.findall(snake_case_ , snake_case_ ) for pat in pats: __UpperCAmelCase = key.replace(snake_case_ , '''_'''.join(pat.split('''.''' ) ) ) return key def lowercase__ ( snake_case_ :str , snake_case_ :Any , snake_case_ :List[str] ): __UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer __UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": __UpperCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Dict , snake_case_ :int=42 ): # Step 1: Convert pytorch tensor to numpy __UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __UpperCAmelCase = flax_model.init_weights(PRNGKey(snake_case_ ) ) __UpperCAmelCase = flatten_dict(snake_case_ ) __UpperCAmelCase = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __UpperCAmelCase = rename_key(snake_case_ ) __UpperCAmelCase = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters __UpperCAmelCase , __UpperCAmelCase = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown __UpperCAmelCase = jnp.asarray(snake_case_ ) return unflatten_dict(snake_case_ )
332
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _lowercase : List[str] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ): if isinstance(snake_case_ , np.ndarray ): return list(tensor.shape ) __UpperCAmelCase = tf.shape(snake_case_ ) if tensor.shape == tf.TensorShape(snake_case_ ): return dynamic __UpperCAmelCase = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )] def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ ) def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized __UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __UpperCAmelCase = [1] * inputs.shape.rank __UpperCAmelCase = shape_list(snake_case_ )[axis] __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) __UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ ) # Compute layer normalization using the batch_normalization # function. __UpperCAmelCase = tf.nn.batch_normalization( snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , ) return outputs def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __UpperCAmelCase = tf.shape(snake_case_ ) __UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case_ , snake_case_ ) def lowercase__ ( snake_case_ :tf.Tensor ): if not isinstance(snake_case_ , tf.Tensor ): __UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __UpperCAmelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __UpperCAmelCase = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __UpperCAmelCase = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ): tf.debugging.assert_less( snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ): __UpperCAmelCase = 64_512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) __UpperCAmelCase = np.asarray(snake_case_ ) __UpperCAmelCase = 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __UpperCAmelCase = np.array_split(snake_case_ , snake_case_ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case_ ): __UpperCAmelCase = chunk_data else: __UpperCAmelCase = data def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ): if name in group.attrs: __UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]] else: __UpperCAmelCase = [] __UpperCAmelCase = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def lowercase__ ( snake_case_ :Tuple ): def _expand_single_ad_tensor(snake_case_ :Optional[int] ): if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case_ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
332
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase :Optional[Any] = logging.get_logger(__name__) __UpperCAmelCase :Union[str, Any] = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class a ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = "biogpt" def __init__( self : Any , snake_case : Any=4_2384 , snake_case : int=1024 , snake_case : Any=24 , snake_case : Optional[Any]=16 , snake_case : Dict=4096 , snake_case : List[Any]="gelu" , snake_case : int=0.1 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=1024 , snake_case : Union[str, Any]=0.02 , snake_case : Any=1E-12 , snake_case : int=True , snake_case : Union[str, Any]=True , snake_case : Dict=0.0 , snake_case : Tuple=0.0 , snake_case : int=1 , snake_case : Tuple=0 , snake_case : int=2 , **snake_case : List[str] , ) -> Optional[int]: __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Dict = max_position_embeddings __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : Tuple = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : str = layer_norm_eps __UpperCAmelCase : List[Any] = scale_embedding __UpperCAmelCase : Union[str, Any] = use_cache __UpperCAmelCase : Dict = layerdrop __UpperCAmelCase : str = activation_dropout super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
240
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class a ( _a ): """simple docstring""" def __init__( self : Optional[Any] , snake_case : Dict , snake_case : Dict=13 , snake_case : str=7 , snake_case : Dict=True , snake_case : Any=True , snake_case : Optional[Any]=True , snake_case : Optional[Any]=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Any=5 , snake_case : List[str]=4 , snake_case : List[str]=37 , snake_case : int="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : Union[str, Any]=512 , snake_case : int=16 , snake_case : Optional[Any]=2 , snake_case : List[Any]=0.02 , snake_case : Any=False , snake_case : int=True , snake_case : Union[str, Any]="None" , snake_case : str=3 , snake_case : Union[str, Any]=4 , snake_case : Any=None , ) -> List[Any]: __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Dict = batch_size __UpperCAmelCase : Any = seq_length __UpperCAmelCase : List[Any] = is_training __UpperCAmelCase : List[str] = use_input_mask __UpperCAmelCase : Union[str, Any] = use_token_type_ids __UpperCAmelCase : Any = use_labels __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Any = type_vocab_size __UpperCAmelCase : Tuple = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Dict = num_labels __UpperCAmelCase : Any = num_choices __UpperCAmelCase : Any = relative_attention __UpperCAmelCase : Dict = position_biased_input __UpperCAmelCase : Optional[int] = pos_att_type __UpperCAmelCase : Dict = scope def lowerCamelCase__ ( self : Dict ) -> Optional[int]: __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Optional[Any] = None if self.use_input_mask: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[str] = None __UpperCAmelCase : int = None __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ) -> List[str]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCamelCase__ ( self : Union[str, Any] ) -> str: __UpperCAmelCase : Optional[int] = self.get_config() __UpperCAmelCase : Dict = 300 return config def lowerCamelCase__ ( self : Any , snake_case : int ) -> Any: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase : List[str] = DebertaModel(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )[0] __UpperCAmelCase : Tuple = model(snake_case , token_type_ids=snake_case )[0] __UpperCAmelCase : Optional[int] = model(snake_case )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Tuple , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] ) -> Optional[int]: __UpperCAmelCase : Union[str, Any] = DebertaForMaskedLM(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[Any] , snake_case : str , snake_case : Tuple ) -> Union[str, Any]: __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : List[Any] = DebertaForSequenceClassification(snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(snake_case ) def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Optional[int] ) -> int: __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Union[str, Any] = DebertaForTokenClassification(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : str ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = DebertaForQuestionAnswering(config=snake_case ) model.to(snake_case ) model.eval() __UpperCAmelCase : Optional[int] = model( snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str ) -> int: __UpperCAmelCase : int = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( _a , _a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Dict = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Any = False def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Dict = DebertaModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def lowerCamelCase__ ( self : Optional[int] ) -> Dict: self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*snake_case ) def lowerCamelCase__ ( self : Dict ) -> List[str]: __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case ) def lowerCamelCase__ ( self : Dict ) -> str: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case ) def lowerCamelCase__ ( self : Union[str, Any] ) -> Any: __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*snake_case ) def lowerCamelCase__ ( self : Dict ) -> Optional[Any]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*snake_case ) @slow def lowerCamelCase__ ( self : Dict ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : str = DebertaModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='''Model not available yet''' ) def lowerCamelCase__ ( self : Dict ) -> Tuple: pass @slow def lowerCamelCase__ ( self : int ) -> Optional[Any]: __UpperCAmelCase : Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) __UpperCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) __UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : int = model(snake_case , attention_mask=snake_case )[0] # compare the actual values for a slice. __UpperCAmelCase : Optional[Any] = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
240
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) _UpperCAmelCase : Any = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """longformer""" def __init__( self : Any , UpperCAmelCase : Union[List[int], int] = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 30522 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1e-12 , UpperCAmelCase : bool = False , **UpperCAmelCase : int , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase__ : str = attention_window lowerCamelCase__ : Optional[int] = sep_token_id lowerCamelCase__ : Optional[Any] = bos_token_id lowerCamelCase__ : int = eos_token_id lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[str] = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : str = type_vocab_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : Optional[Any] = onnx_export class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : Optional[Any] , UpperCAmelCase : "PretrainedConfig" , UpperCAmelCase : str = "default" , UpperCAmelCase : "List[PatchingSpec]" = None ) -> Any: super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Any = True @property def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase__ : int = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCamelCase__ : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def A_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase__ : Any = super().outputs if self.task == "default": lowerCamelCase__ : List[Any] = {0: 'batch'} return outputs @property def A_ ( self : Optional[int] ) -> float: return 1e-4 @property def A_ ( self : str ) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def A_ ( self : List[str] , UpperCAmelCase : "PreTrainedTokenizerBase" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowerCamelCase__ : List[str] = super().generate_dummy_inputs( preprocessor=UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCamelCase__ : Dict = torch.zeros_like(inputs['input_ids'] ) # make every second token global lowerCamelCase__ : Dict = 1 return inputs
50
'''simple docstring''' import math class a : def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int: _a = 0.0 _a = 0.0 for i in range(len(__magic_name__ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> list[list[int | float]]: for i in range(len(__magic_name__ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _A () -> None: '''simple docstring''' _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowerCAmelCase__ ): for j in range(len(lowerCAmelCase__ ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # Update the winning vector _a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # results print(f'Clusters that the test sample belongs to : {winner}' ) print(f'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
168
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = BlipImageProcessor() lowerCAmelCase_ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' ) lowerCAmelCase_ = BlipProcessor(__lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self , **lowercase_ ) -> str: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def _lowercase ( self , **lowercase_ ) -> Tuple: '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def _lowercase ( self ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowerCAmelCase_ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowerCAmelCase_ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) lowerCAmelCase_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def _lowercase ( self ) -> Dict: '''simple docstring''' lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = image_processor(__lowerCAmelCase , return_tensors='np' ) lowerCAmelCase_ = processor(images=__lowerCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowercase ( self ) -> Any: '''simple docstring''' lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowerCAmelCase_ = 'lower newer' lowerCAmelCase_ = processor(text=__lowerCAmelCase ) lowerCAmelCase_ = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowerCAmelCase_ = 'lower newer' lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ = processor.batch_decode(__lowerCAmelCase ) lowerCAmelCase_ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowerCAmelCase_ = 'lower newer' lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
370
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( a_ , a_ ) -> Tuple: lowerCAmelCase_ = XCLIPTextConfig() # derive patch size from model name lowerCAmelCase_ = model_name.find('patch' ) lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ ) if "large" in model_name: lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 lowerCAmelCase_ = 12 lowerCAmelCase_ = 1_024 lowerCAmelCase_ = 4_096 lowerCAmelCase_ = 16 lowerCAmelCase_ = 24 lowerCAmelCase_ = 768 lowerCAmelCase_ = 3_072 if model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = 336 lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ ) if "large" in model_name: lowerCAmelCase_ = 768 return config def lowerCamelCase ( a_ ) -> List[str]: # text encoder if name == "token_embedding.weight": lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowerCAmelCase_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowerCAmelCase_ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowerCAmelCase_ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def lowerCamelCase ( a_ , a_ ) -> Dict: for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(a_ ) if "attn.in_proj" in key: lowerCAmelCase_ = key.split('.' ) if key.startswith('visual' ): lowerCAmelCase_ = key_split[3] lowerCAmelCase_ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[ :dim ] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[ -dim: ] else: if "weight" in key: lowerCAmelCase_ = val[ :dim, : ] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[ -dim:, : ] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] elif key.startswith('mit' ): lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.vision_config.mit_hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[dim : dim * 2, :] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = key_split[2] lowerCAmelCase_ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[ dim : dim * 2 ] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = rename_key(a_ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowerCAmelCase_ = val.T lowerCAmelCase_ = val return orig_state_dict def lowerCamelCase ( a_ ) -> List[str]: if num_frames == 8: lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowerCAmelCase_ = 'eating_spaghetti.npy' elif num_frames == 32: lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy' lowerCAmelCase_ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , ) lowerCAmelCase_ = np.load(a_ ) return list(a_ ) def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]: lowerCAmelCase_ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowerCAmelCase_ = model_to_url[model_name] lowerCAmelCase_ = 8 if "16-frames" in model_name: lowerCAmelCase_ = 16 elif "shot" in model_name: lowerCAmelCase_ = 32 lowerCAmelCase_ = get_xclip_config(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) model.eval() if "drive" in checkpoint_url: lowerCAmelCase_ = 'pytorch_model.bin' gdown.cached_download(a_ , a_ , quiet=a_ ) lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model'] else: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model'] lowerCAmelCase_ = convert_state_dict(a_ , a_ ) lowerCAmelCase_ = XCLIPModel(a_ ) lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ ) lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ ) lowerCAmelCase_ = prepare_video(a_ ) lowerCAmelCase_ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowerCAmelCase_ = model(**a_ ) # Verify outputs lowerCAmelCase_ = outputs.logits_per_video lowerCAmelCase_ = logits_per_video.softmax(dim=1 ) print('Probs:' , a_ ) # kinetics-400 if model_name == "xclip-base-patch32": lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(a_ , a_ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a_ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(a_ , organization='nielsr' ) processor.push_to_hub(a_ , organization='nielsr' ) slow_tokenizer.push_to_hub(a_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
14
0
"""simple docstring""" def lowercase ( A_ )-> int: '''simple docstring''' a : Any = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowercase ( A_ = 100 )-> int: '''simple docstring''' a : Tuple = 1 a : Optional[int] = 2 for i in range(2 , max_n + 1 ): a : Optional[Any] = pre_numerator a : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1 a : Dict = cur_numerator a : List[str] = e_cont * pre_numerator + temp return sum_digits(A_ ) if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" import argparse import copy def lowercase__ ( snake_case_ :Tuple ): __UpperCAmelCase = {} with open(snake_case_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[1], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __UpperCAmelCase = [] _list.append([line.split()[0], line.split()[2]] ) __UpperCAmelCase = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ): with open(snake_case_ ) as f: __UpperCAmelCase = f.read(1 ) __UpperCAmelCase = start_node __UpperCAmelCase = [] __UpperCAmelCase = start_node __UpperCAmelCase = 0 while visiting not in first_solution: __UpperCAmelCase = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution: __UpperCAmelCase = k[1] __UpperCAmelCase = k[0] first_solution.append(snake_case_ ) __UpperCAmelCase = distance_of_first_solution + int(snake_case_ ) __UpperCAmelCase = best_node first_solution.append(snake_case_ ) __UpperCAmelCase = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __UpperCAmelCase = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ): __UpperCAmelCase = [] for n in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) for kn in solution[1:-1]: __UpperCAmelCase = solution.index(snake_case_ ) if n == kn: continue __UpperCAmelCase = copy.deepcopy(snake_case_ ) __UpperCAmelCase = kn __UpperCAmelCase = n __UpperCAmelCase = 0 for k in _tmp[:-1]: __UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __UpperCAmelCase = distance + int(i[1] ) _tmp.append(snake_case_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ): __UpperCAmelCase = 1 __UpperCAmelCase = first_solution __UpperCAmelCase = [] __UpperCAmelCase = distance_of_first_solution __UpperCAmelCase = solution while count <= iters: __UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ ) __UpperCAmelCase = 0 __UpperCAmelCase = neighborhood[index_of_best_solution] __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = False while not found: __UpperCAmelCase = 0 while i < len(snake_case_ ): if best_solution[i] != solution[i]: __UpperCAmelCase = best_solution[i] __UpperCAmelCase = solution[i] break __UpperCAmelCase = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __UpperCAmelCase = True __UpperCAmelCase = best_solution[:-1] __UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __UpperCAmelCase = cost __UpperCAmelCase = solution else: __UpperCAmelCase = index_of_best_solution + 1 __UpperCAmelCase = neighborhood[index_of_best_solution] if len(snake_case_ ) >= size: tabu_list.pop(0 ) __UpperCAmelCase = count + 1 return best_solution_ever, best_cost def lowercase__ ( snake_case_ :str=None ): __UpperCAmelCase = generate_neighbours(args.File ) __UpperCAmelCase , __UpperCAmelCase = generate_first_solution( args.File , snake_case_ ) __UpperCAmelCase , __UpperCAmelCase = tabu_search( snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": _lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
332
0
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm __snake_case : str = re.compile("""[^A-Za-z_0-9]""") # parameters used in DuplicationIndex __snake_case : Optional[Any] = 10 __snake_case : Tuple = 2_56 def _UpperCamelCase ( UpperCamelCase_ : List[str] ) -> Optional[MinHash]: """simple docstring""" if len(UpperCamelCase_ ) < MIN_NUM_TOKENS: return None lowerCAmelCase__ = MinHash(num_perm=UpperCamelCase_ ) for token in set(UpperCamelCase_ ): min_hash.update(token.encode() ) return min_hash def _UpperCamelCase ( UpperCamelCase_ : str ) -> Set[str]: """simple docstring""" return {t for t in NON_ALPHA.split(UpperCamelCase_ ) if len(t.strip() ) > 0} class __SCREAMING_SNAKE_CASE : def __init__( self , *, _UpperCamelCase = 0.85 , ): """simple docstring""" lowerCAmelCase__ = duplication_jaccard_threshold lowerCAmelCase__ = NUM_PERM lowerCAmelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) lowerCAmelCase__ = defaultdict(_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self._index.query(_UpperCamelCase ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_UpperCamelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase__ = [] for base, duplicates in self._duplicate_clusters.items(): lowerCAmelCase__ = [base] + list(_UpperCamelCase ) # reformat the cluster to be a list of dict lowerCAmelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster] duplicate_clusters.append(_UpperCamelCase ) return duplicate_clusters def UpperCamelCase__ ( self , _UpperCamelCase ): """simple docstring""" lowerCAmelCase__ = self.get_duplicate_clusters() with open(_UpperCamelCase , 'w' ) as f: json.dump(_UpperCamelCase , _UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase_ : str ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = element lowerCAmelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def _UpperCamelCase ( UpperCamelCase_ : Type[Dataset] ) -> Union[str, Any]: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(UpperCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def _UpperCamelCase ( UpperCamelCase_ : Type[Dataset] , UpperCamelCase_ : float ) -> List[str]: """simple docstring""" lowerCAmelCase__ = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase_ ) ) , max_queue_size=100 ) ): di.add(UpperCamelCase_ , UpperCamelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> float: """simple docstring""" lowerCAmelCase__ = get_tokens(UpperCamelCase_ ) lowerCAmelCase__ = get_tokens(UpperCamelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) __snake_case : Optional[Any] = None def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> Dict: """simple docstring""" lowerCAmelCase__ = [] for elementa in cluster: lowerCAmelCase__ = _shared_dataset[elementa['base_index']]['content'] for elementa in extremes: lowerCAmelCase__ = _shared_dataset[elementa['base_index']]['content'] if jaccard_similarity(UpperCamelCase_ , UpperCamelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: lowerCAmelCase__ = 1 extremes.append(UpperCamelCase_ ) return extremes def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" global _shared_dataset lowerCAmelCase__ = dataset lowerCAmelCase__ = [] lowerCAmelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( UpperCamelCase_ , UpperCamelCase_ , ) , total=len(UpperCamelCase_ ) , ): extremes_list.append(UpperCamelCase_ ) return extremes_list def _UpperCamelCase ( UpperCamelCase_ : Type[Dataset] , UpperCamelCase_ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """simple docstring""" lowerCAmelCase__ = make_duplicate_clusters(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster} lowerCAmelCase__ = {} lowerCAmelCase__ = find_extremes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for extremes in extremes_clusters: for element in extremes: lowerCAmelCase__ = element lowerCAmelCase__ = duplicate_indices - set(extreme_dict.keys() ) lowerCAmelCase__ = dataset.filter(lambda UpperCamelCase_ , UpperCamelCase_ : idx not in remove_indices , with_indices=UpperCamelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: lowerCAmelCase__ = element['base_index'] in extreme_dict if element["is_extreme"]: lowerCAmelCase__ = extreme_dict[element['base_index']]['copies'] print(F"Original dataset size: {len(UpperCamelCase_ )}" ) print(F"Number of duplicate clusters: {len(UpperCamelCase_ )}" ) print(F"Files in duplicate cluster: {len(UpperCamelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(UpperCamelCase_ )}" ) print(F"Filtered dataset size: {len(UpperCamelCase_ )}" ) return ds_filter, duplicate_clusters
122
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __snake_case : Any = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( __lowercase): _SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values'''] def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" super().__init__(**_UpperCamelCase ) lowerCAmelCase__ = size if size is not None else {'shortest_edge': 2_56} lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) lowerCAmelCase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} lowerCAmelCase__ = get_size_dict(_UpperCamelCase ) lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = resample lowerCAmelCase__ = do_center_crop lowerCAmelCase__ = crop_size lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) lowerCAmelCase__ = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase ) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = get_size_dict(_UpperCamelCase ) return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ): """simple docstring""" return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ = size if size is not None else self.size lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase ) lowerCAmelCase__ = resample if resample is not None else self.resample lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ = get_size_dict(_UpperCamelCase ) lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ = image_std if image_std is not None else self.image_std lowerCAmelCase__ = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase__ = [to_numpy_array(_UpperCamelCase ) for image in images] if do_resize: lowerCAmelCase__ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] if do_center_crop: lowerCAmelCase__ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images] if do_rescale: lowerCAmelCase__ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images] if do_normalize: lowerCAmelCase__ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images] lowerCAmelCase__ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] lowerCAmelCase__ = {'pixel_values': images} return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
122
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowercase_ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]] UpperCamelCase_ = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = [[1, 2, 3], [1, 2, 4]] UpperCamelCase_ = DisjunctiveConstraint(_A ) UpperCamelCase_ = dc.update(1 ) UpperCamelCase_ = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase_ = dc.update(2 ) UpperCamelCase_ = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase_ = dc.update(3 ) UpperCamelCase_ = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCamelCase_ = DisjunctiveConstraint(_A ) UpperCamelCase_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase_ = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCamelCase_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCamelCase_ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCamelCase_ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCamelCase_ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
122
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = IFInpaintingPipeline A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: return self._get_dummy_components() def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]: if str(_A ).startswith('mps' ): __magic_name__ : Optional[Any] = torch.manual_seed(_A ) else: __magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) __magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __magic_name__ : Tuple = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCAmelCase ( self : List[Any] ) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def __lowerCAmelCase ( self : Dict ) -> Any: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self : Tuple ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self : Optional[int] ) -> List[str]: self._test_save_load_local() def __lowerCAmelCase ( self : Any ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
331
0
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: int = 5_0257 , _SCREAMING_SNAKE_CASE: int = 1024 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int = 12 , _SCREAMING_SNAKE_CASE: int = 12 , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: str = "gelu_new" , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 1e-5 , _SCREAMING_SNAKE_CASE: float = 0.02 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , ) -> str: """simple docstring""" super().__init__() __lowerCAmelCase : str = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" F""" `n_embd`: {n_embd} are not equal.""") __lowerCAmelCase : str = prefix_inner_dim __lowerCAmelCase : str = prefix_hidden_dim __lowerCAmelCase : List[str] = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCAmelCase : Dict = ( nn.Linear(self.prefix_hidden_dim , __lowerCAmelCase) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCAmelCase : int = GPTaConfig( vocab_size=__lowerCAmelCase , n_positions=__lowerCAmelCase , n_embd=__lowerCAmelCase , n_layer=__lowerCAmelCase , n_head=__lowerCAmelCase , n_inner=__lowerCAmelCase , activation_function=__lowerCAmelCase , resid_pdrop=__lowerCAmelCase , embd_pdrop=__lowerCAmelCase , attn_pdrop=__lowerCAmelCase , layer_norm_epsilon=__lowerCAmelCase , initializer_range=__lowerCAmelCase , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , scale_attn_by_inverse_layer_idx=__lowerCAmelCase , reorder_and_upcast_attn=__lowerCAmelCase , ) __lowerCAmelCase : Union[str, Any] = GPTaLMHeadModel(__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: torch.Tensor , _SCREAMING_SNAKE_CASE: torch.Tensor , _SCREAMING_SNAKE_CASE: Optional[torch.Tensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.Tensor] = None , ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = self.transformer.transformer.wte(__lowerCAmelCase) __lowerCAmelCase : Tuple = self.encode_prefix(__lowerCAmelCase) __lowerCAmelCase : Any = self.decode_prefix(__lowerCAmelCase) __lowerCAmelCase : Any = torch.cat((prefix_embeds, embedding_text) , dim=1) if labels is not None: __lowerCAmelCase : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device) __lowerCAmelCase : List[Any] = torch.cat((dummy_token, input_ids) , dim=1) __lowerCAmelCase : str = self.transformer(inputs_embeds=__lowerCAmelCase , labels=__lowerCAmelCase , attention_mask=__lowerCAmelCase) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: torch.device) -> Union[str, Any]: """simple docstring""" return torch.zeros(__lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCAmelCase) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Union[str, Any]: """simple docstring""" return self.encode_prefix(__lowerCAmelCase) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any]) -> Dict: """simple docstring""" __lowerCAmelCase : int = torch.split(__lowerCAmelCase , 1 , dim=0) __lowerCAmelCase : int = [] __lowerCAmelCase : List[str] = [] for feature in features: __lowerCAmelCase : int = self.decode_prefix(feature.to(__lowerCAmelCase)) # back to the clip feature # Only support beam search for now __lowerCAmelCase , __lowerCAmelCase : str = self.generate_beam( input_embeds=__lowerCAmelCase , device=__lowerCAmelCase , eos_token_id=__lowerCAmelCase) generated_tokens.append(output_tokens[0]) generated_seq_lengths.append(seq_lengths[0]) __lowerCAmelCase : str = torch.stack(__lowerCAmelCase) __lowerCAmelCase : str = torch.stack(__lowerCAmelCase) return generated_tokens, generated_seq_lengths @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: int = 5 , _SCREAMING_SNAKE_CASE: int = 67 , _SCREAMING_SNAKE_CASE: float = 1.0 , _SCREAMING_SNAKE_CASE: Optional[int] = None , ) -> List[Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = eos_token_id __lowerCAmelCase : List[str] = None __lowerCAmelCase : List[Any] = None __lowerCAmelCase : int = torch.ones(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.int) __lowerCAmelCase : str = torch.zeros(__lowerCAmelCase , device=__lowerCAmelCase , dtype=torch.bool) if input_embeds is not None: __lowerCAmelCase : str = input_embeds else: __lowerCAmelCase : Optional[Any] = self.transformer.transformer.wte(__lowerCAmelCase) for i in range(__lowerCAmelCase): __lowerCAmelCase : List[str] = self.transformer(inputs_embeds=__lowerCAmelCase) __lowerCAmelCase : int = outputs.logits __lowerCAmelCase : Any = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __lowerCAmelCase : Optional[int] = logits.softmax(-1).log() if scores is None: __lowerCAmelCase , __lowerCAmelCase : str = logits.topk(__lowerCAmelCase , -1) __lowerCAmelCase : List[str] = generated.expand(__lowerCAmelCase , *generated.shape[1:]) __lowerCAmelCase , __lowerCAmelCase : int = next_tokens.permute(1 , 0), scores.squeeze(0) if tokens is None: __lowerCAmelCase : List[str] = next_tokens else: __lowerCAmelCase : Union[str, Any] = tokens.expand(__lowerCAmelCase , *tokens.shape[1:]) __lowerCAmelCase : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1) else: __lowerCAmelCase : Optional[int] = -float(np.inf) __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : str = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __lowerCAmelCase : List[str] = scores_sum / seq_lengths[:, None] __lowerCAmelCase , __lowerCAmelCase : int = scores_sum_average.view(-1).topk(__lowerCAmelCase , -1) __lowerCAmelCase : Union[str, Any] = next_tokens // scores_sum.shape[1] __lowerCAmelCase : List[str] = seq_lengths[next_tokens_source] __lowerCAmelCase : Optional[int] = next_tokens % scores_sum.shape[1] __lowerCAmelCase : Tuple = next_tokens.unsqueeze(1) __lowerCAmelCase : str = tokens[next_tokens_source] __lowerCAmelCase : Tuple = torch.cat((tokens, next_tokens) , dim=1) __lowerCAmelCase : Optional[int] = generated[next_tokens_source] __lowerCAmelCase : Optional[int] = scores_sum_average * seq_lengths __lowerCAmelCase : List[Any] = is_stopped[next_tokens_source] __lowerCAmelCase : int = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1) __lowerCAmelCase : str = torch.cat((generated, next_token_embed) , dim=1) __lowerCAmelCase : str = is_stopped + next_tokens.eq(__lowerCAmelCase).squeeze() if is_stopped.all(): break __lowerCAmelCase : List[str] = scores / seq_lengths __lowerCAmelCase : Union[str, Any] = scores.argsort(descending=__lowerCAmelCase) # tokens tensors are already padded to max_seq_length __lowerCAmelCase : Optional[Any] = [tokens[i] for i in order] __lowerCAmelCase : str = torch.stack(__lowerCAmelCase , dim=0) __lowerCAmelCase : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype) return output_texts, seq_lengths
358
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any: """simple docstring""" __lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE) __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small") __lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="pt").input_ids __lowerCAmelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids __lowerCAmelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE) , labels=labels.to(_SCREAMING_SNAKE_CASE)).loss __lowerCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item()) __lowerCAmelCase : List[str] = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
58
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : str ): __lowercase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) __lowercase = AutoTokenizer.from_pretrained("google/mt5-small" ) __lowercase = tokenizer("Hello there", return_tensors="np" ).input_ids __lowercase = tokenizer("Hi I am", return_tensors="np" ).input_ids __lowercase = shift_tokens_right(UpperCAmelCase__, model.config.pad_token_id, model.config.decoder_start_token_id ) __lowercase = model(UpperCAmelCase__, decoder_input_ids=UpperCAmelCase__ ).logits __lowercase = optax.softmax_cross_entropy(UpperCAmelCase__, onehot(UpperCAmelCase__, logits.shape[-1] ) ).mean() __lowercase = -(labels.shape[-1] * loss.item()) __lowercase = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
17
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
1
import os import numpy import onnx def UpperCamelCase_( snake_case__: str , snake_case__: Dict ) -> str: UpperCAmelCase__ = a.name UpperCAmelCase__ = b.name UpperCAmelCase__ = '' UpperCAmelCase__ = '' UpperCAmelCase__ = a == b UpperCAmelCase__ = name_a UpperCAmelCase__ = name_b return res def UpperCamelCase_( snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Dict ) -> Optional[int]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(snake_case__ , snake_case__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ ) _graph_replace_input_with(node_proto.attribute[1].g , snake_case__ , snake_case__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , snake_case__ , snake_case__ ) def UpperCamelCase_( snake_case__: str , snake_case__: Any , snake_case__: Optional[Any] ) -> str: for n in graph_proto.node: _node_replace_input_with(snake_case__ , snake_case__ , snake_case__ ) def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Optional[int] , snake_case__: Optional[Any] ) -> List[Any]: UpperCAmelCase__ = list(model.graph.initializer ) UpperCAmelCase__ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i UpperCAmelCase__ = inits[i].name UpperCAmelCase__ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , snake_case__ , snake_case__ ) def UpperCamelCase_( snake_case__: List[str] ) -> Optional[Any]: UpperCAmelCase__ = os.path.dirname(snake_case__ ) UpperCAmelCase__ = os.path.basename(snake_case__ ) UpperCAmelCase__ = onnx.load(os.path.join(snake_case__ , snake_case__ ) ) UpperCAmelCase__ = list(model.graph.initializer ) UpperCAmelCase__ = set() UpperCAmelCase__ = {} UpperCAmelCase__ = [] UpperCAmelCase__ = 0 for i in range(len(snake_case__ ) ): if i in dup_set: continue for j in range(i + 1 , len(snake_case__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(snake_case__ ) dup_set.add(snake_case__ ) UpperCAmelCase__ = inits[j].data_type UpperCAmelCase__ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , snake_case__ ) total_reduced_size += mem_size UpperCAmelCase__ = inits[i].name UpperCAmelCase__ = inits[j].name if name_i in dup_map: dup_map[name_i].append(snake_case__ ) else: UpperCAmelCase__ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' ) UpperCAmelCase__ = sorted(snake_case__ ) _remove_dup_initializers_from_model(snake_case__ , snake_case__ , snake_case__ ) UpperCAmelCase__ = 'optimized_' + model_file_name UpperCAmelCase__ = os.path.join(snake_case__ , snake_case__ ) onnx.save(snake_case__ , snake_case__ ) return new_model
335
from ...configuration_utils import PretrainedConfig _UpperCamelCase = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class lowercase ( _UpperCamelCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """tapas""" def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=__a , **__a ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_sizes UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps # Fine-tuning task hyperparameters UpperCAmelCase__ = positive_label_weight UpperCAmelCase__ = num_aggregation_labels UpperCAmelCase__ = aggregation_loss_weight UpperCAmelCase__ = use_answer_as_supervision UpperCAmelCase__ = answer_loss_importance UpperCAmelCase__ = use_normalized_answer_loss UpperCAmelCase__ = huber_loss_delta UpperCAmelCase__ = temperature UpperCAmelCase__ = aggregation_temperature UpperCAmelCase__ = use_gumbel_for_cells UpperCAmelCase__ = use_gumbel_for_aggregation UpperCAmelCase__ = average_approximation_function UpperCAmelCase__ = cell_selection_preference UpperCAmelCase__ = answer_loss_cutoff UpperCAmelCase__ = max_num_rows UpperCAmelCase__ = max_num_columns UpperCAmelCase__ = average_logits_per_cell UpperCAmelCase__ = select_one_column UpperCAmelCase__ = allow_empty_column_selection UpperCAmelCase__ = init_cell_selection_weights_to_zero UpperCAmelCase__ = reset_position_index_per_cell UpperCAmelCase__ = disable_per_token_loss # Aggregation hyperparameters UpperCAmelCase__ = aggregation_labels UpperCAmelCase__ = no_aggregation_label_index if isinstance(self.aggregation_labels , __a ): UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
335
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : Any = { """openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""", } # fmt: off lowercase__ : List[Any] = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : List[str] = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """whisper""" _SCREAMING_SNAKE_CASE = ["""past_key_values"""] _SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_8_6_5 , SCREAMING_SNAKE_CASE_ : str=8_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=6 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=6 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_5_3_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_5_3_6 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_0_2_5_7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str="gelu" , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_5_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_4_8 , SCREAMING_SNAKE_CASE_ : Tuple=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Any=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : str=[2_2_0, 5_0_2_5_6] , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=2_5_6 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=0.05 , SCREAMING_SNAKE_CASE_ : Tuple=1_0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Dict=7 , **SCREAMING_SNAKE_CASE_ : List[Any] , ): lowerCAmelCase_ : Dict = vocab_size lowerCAmelCase_ : Any = num_mel_bins lowerCAmelCase_ : Dict = d_model lowerCAmelCase_ : int = encoder_layers lowerCAmelCase_ : Optional[int] = encoder_attention_heads lowerCAmelCase_ : List[Any] = decoder_layers lowerCAmelCase_ : Optional[int] = decoder_attention_heads lowerCAmelCase_ : int = decoder_ffn_dim lowerCAmelCase_ : Any = encoder_ffn_dim lowerCAmelCase_ : Optional[Any] = dropout lowerCAmelCase_ : Dict = attention_dropout lowerCAmelCase_ : Dict = activation_dropout lowerCAmelCase_ : Union[str, Any] = activation_function lowerCAmelCase_ : int = init_std lowerCAmelCase_ : List[str] = encoder_layerdrop lowerCAmelCase_ : Union[str, Any] = decoder_layerdrop lowerCAmelCase_ : Optional[int] = use_cache lowerCAmelCase_ : List[str] = encoder_layers lowerCAmelCase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase_ : int = max_source_positions lowerCAmelCase_ : List[str] = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. lowerCAmelCase_ : Tuple = classifier_proj_size lowerCAmelCase_ : int = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ : str = apply_spec_augment lowerCAmelCase_ : List[str] = mask_time_prob lowerCAmelCase_ : Union[str, Any] = mask_time_length lowerCAmelCase_ : Tuple = mask_time_min_masks lowerCAmelCase_ : Optional[Any] = mask_feature_prob lowerCAmelCase_ : Optional[int] = mask_feature_length lowerCAmelCase_ : int = mask_feature_min_masks lowerCAmelCase_ : Tuple = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : str = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: lowerCAmelCase_ : Dict = {0: 'batch'} else: lowerCAmelCase_ : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' ) return common_inputs def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE_ : int = 2_2_0_5_0 , SCREAMING_SNAKE_CASE_ : float = 5.0 , SCREAMING_SNAKE_CASE_ : int = 2_2_0 , ): lowerCAmelCase_ : Any = OrderedDict() lowerCAmelCase_ : Optional[int] = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) lowerCAmelCase_ : List[Any] = encoder_inputs['input_features'].shape[2] lowerCAmelCase_ : str = encoder_sequence_length // 2 if self.use_past else seq_length lowerCAmelCase_ : Dict = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = encoder_inputs.pop('input_features' ) lowerCAmelCase_ : Optional[int] = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: lowerCAmelCase_ : Optional[Any] = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return 1E-3
224
"""simple docstring""" import random def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : bool = False ) -> dict: """simple docstring""" lowerCAmelCase_ : dict = {i: [] for i in range(lowerCAmelCase__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(lowerCAmelCase__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(lowerCAmelCase__ ): for j in range(i + 1 , lowerCAmelCase__ ): if random.random() < probability: graph[i].append(lowerCAmelCase__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(lowerCAmelCase__ ) return graph def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> dict: """simple docstring""" return { i: [j for j in range(lowerCAmelCase__ ) if i != j] for i in range(lowerCAmelCase__ ) } if __name__ == "__main__": import doctest doctest.testmod()
224
1
"""simple docstring""" import string def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str: '''simple docstring''' lowercase_ = '''''' for i in sequence: lowercase_ = ord(__UpperCAmelCase ) if 65 <= extract <= 90: output += chr(1_55 - extract ) elif 97 <= extract <= 1_22: output += chr(2_19 - extract ) else: output += i return output def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str: '''simple docstring''' lowercase_ = string.ascii_letters lowercase_ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(__UpperCAmelCase )] if c in letters else c for c in sequence ) def _SCREAMING_SNAKE_CASE () -> None: '''simple docstring''' from timeit import timeit print("""Running performance benchmarks...""" ) lowercase_ = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__UpperCAmelCase )} seconds''' ) print(F'''> atbash(): {timeit("atbash(printable)" , setup=__UpperCAmelCase )} seconds''' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(F"{example} encrypted in atbash: {atbash(example)}") benchmark()
352
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase : Union[str, Any] = { "allenai/led-base-16384": 1_6384, } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = LEDTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]="replace" , lowerCAmelCase_ : Dict="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[Any]="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[str]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=True , **lowerCAmelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = getattr(lowerCAmelCase_ , pre_tok_state.pop("""type""")) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**lowerCAmelCase_) lowercase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase_ = """post_processor""" lowercase_ = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) if tokenizer_component_instance: lowercase_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ = tuple(state["""sep"""]) if "cls" in state: lowercase_ = tuple(state["""cls"""]) lowercase_ = False if state.get("""add_prefix_space""" , lowerCAmelCase_) != add_prefix_space: lowercase_ = add_prefix_space lowercase_ = True if state.get("""trim_offsets""" , lowerCAmelCase_) != trim_offsets: lowercase_ = trim_offsets lowercase_ = True if changes_to_apply: lowercase_ = getattr(lowerCAmelCase_ , state.pop("""type""")) lowercase_ = component_class(**lowerCAmelCase_) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCAmelCase ( self : List[str]): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str): """simple docstring""" lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else value lowercase_ = value def _UpperCAmelCase ( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : List[Any]): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any): """simple docstring""" lowercase_ = kwargs.get("""is_split_into_words""" , lowerCAmelCase_) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' """to use it with pretokenized inputs.""") return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None): """simple docstring""" lowercase_ = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_) return tuple(lowerCAmelCase_) def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None): """simple docstring""" lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None): """simple docstring""" lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , ): """simple docstring""" lowercase_ = super()._pad( encoded_inputs=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding_strategy=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) # Load from model defaults if return_attention_mask is None: lowercase_ = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase_ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase_ = len(encoded_inputs["""global_attention_mask"""]) != len(lowerCAmelCase_) if needs_to_be_padded: lowercase_ = len(lowerCAmelCase_) - len(encoded_inputs["""global_attention_mask"""]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase_ = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": lowercase_ = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side)) return encoded_inputs
313
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( _a ): '''simple docstring''' a__ = ["image_processor", "tokenizer"] a__ = "AutoImageProcessor" a__ = "AutoTokenizer" def __init__(self , lowercase__ , lowercase__ ) -> Tuple: super().__init__(lowercase__ , lowercase__ ) __UpperCAmelCase = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ) -> Optional[int]: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __UpperCAmelCase = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: __UpperCAmelCase = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: __UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Dict: return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Optional[Any]: return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def lowerCAmelCase_ (self ) -> str: return ["input_ids", "attention_mask", "pixel_values"]
333
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ ( _a , unittest.TestCase ): '''simple docstring''' a__ = LongformerTokenizer a__ = True a__ = LongformerTokenizerFast a__ = True def lowerCAmelCase_ (self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) __UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCAmelCase = {'''unk_token''': '''<unk>'''} __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def lowerCAmelCase_ (self , **lowercase__ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def lowerCAmelCase_ (self , lowercase__ ) -> Dict: __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = '''lower newer''' return input_text, output_text def lowerCAmelCase_ (self ) -> Optional[Any]: __UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCAmelCase = '''lower newer''' __UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokens + [tokenizer.unk_token] __UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ ) def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase_ (self ) -> int: __UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase_ (self ) -> Any: __UpperCAmelCase = self.get_tokenizer() __UpperCAmelCase = '''Encode this sequence.''' __UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ , lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) # Testing spaces after special tokens __UpperCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space __UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ ) __UpperCAmelCase = '''Encode <mask> sequence''' __UpperCAmelCase = '''Encode <mask>sequence''' __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ , lowercase__ ) __UpperCAmelCase = tokenizer.encode(lowercase__ ) __UpperCAmelCase = encoded.index(lowercase__ ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ , lowercase__ ) def lowerCAmelCase_ (self ) -> Tuple: pass def lowerCAmelCase_ (self ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) __UpperCAmelCase = '''A, <mask> AllenNLP sentence.''' __UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) __UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def lowerCAmelCase_ (self ) -> Optional[int]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ ) def lowerCAmelCase_ (self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}''' __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , ) __UpperCAmelCase = self.rust_tokenizer_class.from_pretrained( lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ ) __UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
333
1
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , ) -> tuple[str, float]: """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
362
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int: """simple docstring""" snake_case = {} snake_case = job['started_at'] snake_case = job['completed_at'] snake_case = date_parser.parse(_UpperCamelCase ) snake_case = date_parser.parse(_UpperCamelCase ) snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 ) snake_case = start snake_case = end snake_case = duration_in_min return job_info def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]: """simple docstring""" snake_case = None if token is not None: snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""} snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json() snake_case = {} try: job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} ) snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 ) for i in range(_UpperCamelCase ): snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json() job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} ) return job_time except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id) SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v['duration']}""")
149
0
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = CodeGenTokenizer lowerCAmelCase__ = CodeGenTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = {'add_prefix_space': True} lowerCAmelCase__ = False def lowercase_ ( self : List[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase__ : List[Any] = {'''unk_token''': '''<unk>'''} UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_A ) ) def lowercase_ ( self : Tuple , **_A : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : List[str] , **_A : Union[str, Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def lowercase_ ( self : str , _A : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = '''lower newer''' UpperCAmelCase__ : int = '''lower newer''' return input_text, output_text def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase__ : Optional[int] = '''lower newer''' UpperCAmelCase__ : Optional[int] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_A , add_prefix_space=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase__ : List[Any] = tokens + [tokenizer.unk_token] UpperCAmelCase__ : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) def lowercase_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=_A ) UpperCAmelCase__ : Optional[int] = '''lower newer''' # Testing tokenization UpperCAmelCase__ : Any = tokenizer.tokenize(_A , add_prefix_space=_A ) UpperCAmelCase__ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) # Testing conversion to ids without special tokens UpperCAmelCase__ : Any = tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A ) UpperCAmelCase__ : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) # Testing conversion to ids with special tokens UpperCAmelCase__ : Any = self.get_rust_tokenizer(add_prefix_space=_A ) UpperCAmelCase__ : Dict = tokenizer.encode(_A , add_prefix_space=_A ) UpperCAmelCase__ : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # Testing the unknown token UpperCAmelCase__ : Dict = tokens + [rust_tokenizer.unk_token] UpperCAmelCase__ : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A ) def lowercase_ ( self : List[Any] , *_A : Tuple , **_A : Optional[Any] ): '''simple docstring''' pass def lowercase_ ( self : str , _A : Any=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase__ : List[Any] = '''This is a simple input''' UpperCAmelCase__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase__ : Any = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase__ : Optional[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input UpperCAmelCase__ : Any = '''This is a simple input''' UpperCAmelCase__ : Any = ['''This is a simple input looooooooong''', '''This is a simple input'''] UpperCAmelCase__ : List[str] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase__ : Tuple = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] UpperCAmelCase__ : Any = tokenizer.pad_token_id UpperCAmelCase__ : List[Any] = tokenizer(_A , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) UpperCAmelCase__ : List[Any] = tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' ) UpperCAmelCase__ : List[str] = tokenizer(*_A , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) UpperCAmelCase__ : Optional[int] = tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = '''$$$''' UpperCAmelCase__ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A ) UpperCAmelCase__ : Any = '''This is a simple input''' UpperCAmelCase__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase__ : str = tokenizer.bos_token_id UpperCAmelCase__ : str = tokenizer(_A ) UpperCAmelCase__ : str = tokenizer(_A ) self.assertEqual(out_s.input_ids[0] , _A ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) UpperCAmelCase__ : List[Any] = tokenizer.decode(out_s.input_ids ) UpperCAmelCase__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _A ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) UpperCAmelCase__ : Optional[int] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' UpperCAmelCase__ : List[str] = '''\nif len_a > len_b: result = a\nelse: result = b''' UpperCAmelCase__ : int = tokenizer.encode(_A ) UpperCAmelCase__ : Optional[Any] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] UpperCAmelCase__ : Optional[int] = tokenizer.decode(_A , truncate_before_pattern=_A ) self.assertEqual(_A , _A ) def lowercase_ ( self : int ): '''simple docstring''' pass
181
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart UpperCamelCase__ = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } UpperCamelCase__ = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } @lru_cache() def a__ ( ) -> List[Any]: UpperCAmelCase__ : int = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) UpperCAmelCase__ : Optional[int] = bs[:] UpperCAmelCase__ : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase__ ) cs.append(2**8 + n ) n += 1 UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs] return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) def a__ ( lowerCAmelCase__ ) -> Union[str, Any]: UpperCAmelCase__ : str = set() UpperCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Optional[int] = char return pairs class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ): '''simple docstring''' UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , ) with open(_A , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase__ : Optional[Any] = json.load(_A ) UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding UpperCAmelCase__ : str = bytes_to_unicode() UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()} with open(_A , encoding='''utf-8''' ) as merges_handle: UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1] UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) ) UpperCAmelCase__ : Optional[int] = {} UpperCAmelCase__ : int = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def lowercase_ ( self : int ): '''simple docstring''' return len(self.encoder ) def lowercase_ ( self : Tuple ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self : List[Any] , _A : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(_A ) UpperCAmelCase__ : Dict = get_pairs(_A ) if not pairs: return token while True: UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : str = bigram UpperCAmelCase__ : int = [] UpperCAmelCase__ : Tuple = 0 while i < len(_A ): try: UpperCAmelCase__ : Optional[int] = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Tuple = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Optional[Any] = tuple(_A ) UpperCAmelCase__ : List[Any] = new_word if len(_A ) == 1: break else: UpperCAmelCase__ : Union[str, Any] = get_pairs(_A ) UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A ) UpperCAmelCase__ : List[Any] = word return word def lowercase_ ( self : str , _A : str ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [] for token in re.findall(self.pat , _A ): UpperCAmelCase__ : str = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) ) return bpe_tokens def lowercase_ ( self : List[str] , _A : Any ): '''simple docstring''' return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def lowercase_ ( self : int , _A : List[str] ): '''simple docstring''' return self.decoder.get(_A ) def lowercase_ ( self : Tuple , _A : Any ): '''simple docstring''' UpperCAmelCase__ : Any = ''''''.join(_A ) UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ : Tuple = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase__ : Any = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' ) UpperCAmelCase__ : Union[str, Any] = 0 with open(_A , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCAmelCase__ : List[str] = token_index writer.write(''' '''.join(_A ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : List[str] = [self.cls_token_id] UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : List[str] = [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()): UpperCAmelCase__ : Tuple = ''' ''' + text return (text, kwargs)
181
1
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]: # Construct model if gpta_config_file == "": a__: Union[str, Any] = GPTaConfig() else: a__: Tuple = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE ) a__: int = GPTaModel(_SCREAMING_SNAKE_CASE ) # Load weights from numpy load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model a__: str = pytorch_dump_folder_path + '/' + WEIGHTS_NAME a__: Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) lowercase__ = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
356
"""simple docstring""" import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class __snake_case ( __lowerCAmelCase , unittest.TestCase ): a__ = PriorTransformer a__ = """hidden_states""" @property def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: Union[str, Any] = 4 a__: Any = 8 a__: Optional[Any] = 7 a__: Tuple = floats_tensor((batch_size, embedding_dim)).to(lowercase) a__: Optional[int] = floats_tensor((batch_size, embedding_dim)).to(lowercase) a__: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(lowercase) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCamelCase_ ( self , lowercase=0) -> str: '''simple docstring''' torch.manual_seed(lowercase) a__: Optional[Any] = 4 a__: Optional[Any] = 8 a__: Union[str, Any] = 7 a__: Optional[Any] = torch.randn((batch_size, embedding_dim)).to(lowercase) a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase) a__: Tuple = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowerCamelCase_ ( self) -> str: '''simple docstring''' return (4, 8) @property def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' return (4, 8) def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: int = { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } a__: Union[str, Any] = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__ , a__: Union[str, Any] = PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowercase) self.assertIsNotNone(lowercase) self.assertEqual(len(loading_info['missing_keys']) , 0) model.to(lowercase) a__: Any = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__ , a__: Tuple = self.prepare_init_args_and_inputs_for_common() a__: Any = self.model_class(**lowercase) a__: str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__: Tuple = [*signature.parameters.keys()] a__: List[Any] = ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowercase) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy') a__: str = model.to(lowercase) if hasattr(lowercase , 'set_default_attn_processor'): model.set_default_attn_processor() a__: Dict = self.get_dummy_seed_input() with torch.no_grad(): a__: str = model(**lowercase)[0] a__: str = output[0, :5].flatten().cpu() print(lowercase) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. a__: Any = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2)) @slow class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self , lowercase=1 , lowercase=7_68 , lowercase=77 , lowercase=0) -> int: '''simple docstring''' torch.manual_seed(lowercase) a__: Union[str, Any] = batch_size a__: List[str] = embedding_dim a__: str = num_embeddings a__: Tuple = torch.randn((batch_size, embedding_dim)).to(lowercase) a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase) a__: str = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ]) def lowerCamelCase_ ( self , lowercase , lowercase) -> str: '''simple docstring''' a__: Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior') model.to(lowercase) a__: Optional[Any] = self.get_dummy_seed_input(seed=lowercase) with torch.no_grad(): a__: Optional[int] = model(**lowercase)[0] assert list(sample.shape) == [1, 7_68] a__: List[str] = sample[0, :8].flatten().cpu() print(lowercase) a__: Union[str, Any] = torch.tensor(lowercase) assert torch_all_close(lowercase , lowercase , atol=1e-3)
203
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __UpperCAmelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __UpperCAmelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def A__ ( __lowerCamelCase, __lowerCamelCase ): return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) ) def A__ ( __lowerCamelCase, __lowerCamelCase ): return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase, __lowerCamelCase ) ) ** (1 / 2) if __name__ == "__main__": def A__ ( ): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''', number=1_00_00, globals=globals(), ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''', number=1_00_00, globals=globals(), ) ) benchmark()
299
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["LayoutLMv2FeatureExtractor"] __UpperCAmelCase = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
299
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ : Tuple = logging.get_logger(__name__) lowerCamelCase_ : Any = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowerCamelCase_ : Optional[int] = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def _A ( lowercase ): """simple docstring""" a =EfficientNetConfig() a =CONFIG_MAP[model_name]['''hidden_dim'''] a =CONFIG_MAP[model_name]['''width_coef'''] a =CONFIG_MAP[model_name]['''depth_coef'''] a =CONFIG_MAP[model_name]['''image_size'''] a =CONFIG_MAP[model_name]['''dropout_rate'''] a =CONFIG_MAP[model_name]['''dw_padding'''] a ='''huggingface/label-files''' a ='''imagenet-1k-id2label.json''' a =10_00 a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) ) a ={int(lowercase ): v for k, v in idalabel.items()} a =idalabel a ={v: k for k, v in idalabel.items()} return config def _A ( ): """simple docstring""" a ='''http://images.cocodataset.org/val2017/000000039769.jpg''' a =Image.open(requests.get(lowercase , stream=lowercase ).raw ) return im def _A ( lowercase ): """simple docstring""" a =CONFIG_MAP[model_name]['''image_size'''] a =EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , ) return preprocessor def _A ( lowercase ): """simple docstring""" a =[v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] a =sorted(set(lowercase ) ) a =len(lowercase ) a ={b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )} a =[] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: a =block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) a ={} for item in rename_keys: if item[0] in original_param_names: a ='''efficientnet.''' + item[1] a ='''classifier.weight''' a ='''classifier.bias''' return key_mapping def _A ( lowercase , lowercase , lowercase ): """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue a =key_mapping[key] if "_conv" in key and "kernel" in key: a =torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: a =torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: a =torch.from_numpy(np.transpose(lowercase ) ) else: a =torch.from_numpy(lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowercase ) @torch.no_grad() def _A ( lowercase , lowercase , lowercase , lowercase ): """simple docstring""" a =model_classes[model_name]( include_top=lowercase , weights='''imagenet''' , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=10_00 , classifier_activation='''softmax''' , ) a =original_model.trainable_variables a =original_model.non_trainable_variables a ={param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: a =param.numpy() a =list(tf_params.keys() ) # Load HuggingFace model a =get_efficientnet_config(lowercase ) a =EfficientNetForImageClassification(lowercase ).eval() a =hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) a =rename_keys(lowercase ) replace_params(lowercase , lowercase , lowercase ) # Initialize preprocessor and preprocess input image a =convert_image_processor(lowercase ) a =preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): a =hf_model(**lowercase ) a =outputs.logits.detach().numpy() # Original model inference a =False a =CONFIG_MAP[model_name]['''image_size'''] a =prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) a =image.img_to_array(lowercase ) a =np.expand_dims(lowercase , axis=0 ) a =original_model.predict(lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(lowercase ): os.mkdir(lowercase ) # Save converted model and image processor hf_model.save_pretrained(lowercase ) preprocessor.save_pretrained(lowercase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) a =f'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowercase ) hf_model.push_to_hub(lowercase ) if __name__ == "__main__": lowerCamelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowerCamelCase_ : Optional[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
215
"""simple docstring""" def _A ( lowercase = 2_00_00_00 ): """simple docstring""" a =[0 for i in range(n + 1 )] a =1 a =1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , lowercase ): a =1 a =0 for i in range(lowercase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'{solution() = }')
215
1
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _SCREAMING_SNAKE_CASE : Any = logging.getLogger() def UpperCamelCase_( snake_case : Path , snake_case : list ): '''simple docstring''' snake_case_ = '\n'.join(__snake_case ) Path(__snake_case ).open("w" ).writelines(__snake_case ) _SCREAMING_SNAKE_CASE : Optional[Any] = "patrickvonplaten/t5-tiny-random" _SCREAMING_SNAKE_CASE : Any = "sshleifer/bart-tiny-random" _SCREAMING_SNAKE_CASE : Optional[Any] = "sshleifer/tiny-mbart" _SCREAMING_SNAKE_CASE : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _snake_case ( _snake_case ): def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' snake_case_ = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() snake_case_ = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(_UpperCamelCase , _UpperCamelCase ) snake_case_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) snake_case_ = 'translation_en_to_de' if model == T5_TINY else 'summarization' snake_case_ = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase ): run_generate() assert Path(_UpperCamelCase ).exists() # os.remove(Path(output_file_name)) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' self.run_eval_tester(_UpperCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCAmelCase__ ( self , a__ ) -> Optional[int]: '''simple docstring''' self.run_eval_tester(_UpperCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCAmelCase__ ( self , a__ ) -> Dict: '''simple docstring''' snake_case_ = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' snake_case_ = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() snake_case_ = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } snake_case_ = Path(self.get_auto_remove_tmp_dir() ) snake_case_ = str(tmp_dir / "scores.json" ) snake_case_ = str(tmp_dir / "val.target" ) _dump_articles(_UpperCamelCase , text["en"] ) _dump_articles(_UpperCamelCase , text["de"] ) snake_case_ = 'translation_en_to_de' if model == T5_TINY else 'summarization' snake_case_ = F'\n run_eval_search.py\n {model}\n {str(_UpperCamelCase )}\n {str(_UpperCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(_UpperCamelCase , "argv" , _UpperCamelCase ): with CaptureStdout() as cs: run_search() snake_case_ = [' num_beams | length_penalty', model, 'Best score args'] snake_case_ = ['Info'] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(_UpperCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_UpperCamelCase ).exists() os.remove(Path(_UpperCamelCase ) )
85
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : Union[PIL.Image.Image, np.ndarray] class lowerCamelCase (_snake_case ): '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any: super().__init__() self.register_modules( prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if latents is None: UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase ) UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma return latents def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase_ : int = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCamelCase , _UpperCamelCase ) @property def __UpperCAmelCase ( self ) -> int: if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(_UpperCamelCase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str: if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 ) if not isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state'] UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(_UpperCamelCase ) def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]: if isinstance(_UpperCamelCase , PIL.Image.Image ): UpperCAmelCase_ : Tuple = 1 elif isinstance(_UpperCamelCase , torch.Tensor ): UpperCAmelCase_ : str = image.shape[0] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase ) else: raise ValueError( f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Tuple = self._execution_device UpperCAmelCase_ : str = batch_size * num_images_per_prompt UpperCAmelCase_ : str = guidance_scale > 1.0 UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # prior self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase ) UpperCAmelCase_ : int = self.scheduler.timesteps UpperCAmelCase_ : int = self.prior.config.num_embeddings UpperCAmelCase_ : Any = self.prior.config.embedding_dim UpperCAmelCase_ : List[str] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase ) for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : int = self.prior( _UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding # remove the variance UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 ) UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase_ : List[str] = self.scheduler.step( _UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = [] for i, latent in enumerate(_UpperCamelCase ): print() UpperCAmelCase_ : List[str] = self.renderer.decode( latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase ) if output_type not in ["np", "pil"]: raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" ) UpperCAmelCase_ : Dict = images.cpu().numpy() if output_type == "pil": UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=_UpperCamelCase )
29
0
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase_ ( snake_case__ , unittest.TestCase ): lowerCAmelCase__ = 'ssube/stable-diffusion-x4-upscaler-onnx' def lowercase_ ( self : Dict , _A : Union[str, Any]=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCAmelCase_ ) ) UpperCAmelCase__ : Any = torch.manual_seed(UpperCAmelCase_ ) UpperCAmelCase__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : Any = self.get_dummy_inputs() UpperCAmelCase__ : Any = pipe(**UpperCAmelCase_ ).images UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : List[Any] = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase__ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : Tuple = self.get_dummy_inputs() UpperCAmelCase__ : str = pipe(**UpperCAmelCase_ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs() UpperCAmelCase__ : Dict = pipe(**UpperCAmelCase_ ).images UpperCAmelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : Union[str, Any] = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase__ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : Any = self.get_dummy_inputs() UpperCAmelCase__ : List[str] = pipe(**UpperCAmelCase_ ).images UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : Dict = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) UpperCAmelCase__ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs() UpperCAmelCase__ : Optional[int] = pipe(**UpperCAmelCase_ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase__ : str = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): @property def lowercase_ ( self : List[Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ort.SessionOptions() UpperCAmelCase__ : List[Any] = False return options def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase__ : int = init_image.resize((128, 128) ) # using the PNDM scheduler by default UpperCAmelCase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : str = "A fantasy landscape, trending on artstation" UpperCAmelCase__ : Any = torch.manual_seed(0 ) UpperCAmelCase__ : int = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) UpperCAmelCase__ : Dict = output.images UpperCAmelCase__ : Tuple = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase__ : Union[str, Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) UpperCAmelCase__ : int = init_image.resize((128, 128) ) UpperCAmelCase__ : Optional[int] = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) UpperCAmelCase__ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) UpperCAmelCase__ : int = "A fantasy landscape, trending on artstation" UpperCAmelCase__ : List[Any] = torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) UpperCAmelCase__ : List[str] = output.images UpperCAmelCase__ : int = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase__ : str = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
365
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ = 1_6 UpperCamelCase__ = 3_2 def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 ) -> Dict: UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase__ : str = DatasetDict( { '''train''': dataset['''train'''].select(lowerCAmelCase__ ), '''validation''': dataset['''train'''].select(lowerCAmelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCAmelCase__ ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): UpperCAmelCase__ : Dict = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. UpperCAmelCase__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": UpperCAmelCase__ : Any = 16 elif accelerator.mixed_precision != "no": UpperCAmelCase__ : Dict = 8 else: UpperCAmelCase__ : List[Any] = None return tokenizer.pad( lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[str] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) UpperCAmelCase__ : List[Any] = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str: # New Code # UpperCAmelCase__ : List[str] = [] # Download the dataset UpperCAmelCase__ : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits UpperCAmelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator UpperCAmelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase__ : Any = config['''lr'''] UpperCAmelCase__ : Any = int(config['''num_epochs'''] ) UpperCAmelCase__ : Any = int(config['''seed'''] ) UpperCAmelCase__ : Dict = int(config['''batch_size'''] ) UpperCAmelCase__ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation UpperCAmelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: UpperCAmelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE UpperCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase__ ) # New Code # # Create our folds: UpperCAmelCase__ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) UpperCAmelCase__ : Dict = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase__ ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_fold_dataloaders( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). UpperCAmelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer UpperCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase__ ) # Instantiate scheduler UpperCAmelCase__ : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Now we train the model for epoch in range(lowerCAmelCase__ ): model.train() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) UpperCAmelCase__ : Union[str, Any] = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Dict = outputs.loss UpperCAmelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Any = outputs.logits.argmax(dim=-1 ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , ) UpperCAmelCase__ : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ ) # New Code # # We also run predictions on the test set at the very end UpperCAmelCase__ : int = [] for step, batch in enumerate(lowerCAmelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase__ : str = model(**lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = outputs.logits UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCAmelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: UpperCAmelCase__ : Union[str, Any] = torch.cat(lowerCAmelCase__ , dim=0 ) UpperCAmelCase__ : Tuple = torch.stack(lowerCAmelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) UpperCAmelCase__ : Optional[Any] = metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCAmelCase__ ) def a__ ( ) -> Any: UpperCAmelCase__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCAmelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Any = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
299
0
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = 0 @slow def _lowerCamelCase ( self : Dict ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(__A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(__A ) , 0 ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) # Check that tokenizer_type ≠ model_type __UpperCamelCase = AutoTokenizer.from_pretrained(__A , config=__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def _lowerCamelCase ( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' , use_fast=__A ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' , use_fast=__A ) self.assertIsInstance(__A , __A ) @require_tokenizers def _lowerCamelCase ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' ) self.assertIsInstance(__A , __A ) def _lowerCamelCase ( self : int ): with pytest.raises(__A ): AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' ) @require_tokenizers def _lowerCamelCase ( self : Optional[Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __UpperCamelCase = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) if isinstance(__A , __A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __A ) else: self.assertEqual(tokenizer.do_lower_case , __A ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def _lowerCamelCase ( self : List[str] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( __A , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ): __UpperCamelCase = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' ) def _lowerCamelCase ( self : List[Any] ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai __UpperCamelCase = TOKENIZER_MAPPING.values() __UpperCamelCase = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(__A ) @require_tokenizers def _lowerCamelCase ( self : Optional[int] ): self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__A ) , __A ) self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __A ) @require_tokenizers def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__A ) __UpperCamelCase = 'Hello, world. How are you?' __UpperCamelCase = tokenizer.tokenize(__A ) self.assertEqual('[UNK]' , tokens[0] ) __UpperCamelCase = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__A ) __UpperCamelCase = tokenizer.tokenize(__A ) self.assertEqual('[UNK]' , tokens[0] ) @require_tokenizers def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' ) self.assertEqual(type(__A ) , __A ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '[UNK]' ) self.assertEqual(tokenizer.padding_side , 'right' ) self.assertEqual(tokenizer.truncation_side , 'right' ) def _lowerCamelCase ( self : int ): __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def _lowerCamelCase ( self : int ): __UpperCamelCase = AutoTokenizer.from_pretrained('ctrl' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(__A , __A ) def _lowerCamelCase ( self : Tuple ): # Check we can load the tokenizer config of an online model. __UpperCamelCase = get_tokenizer_config('bert-base-cased' ) __UpperCamelCase = config.pop('_commit_hash' , __A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(__A , {'do_lower_case': False} ) # This model does not have a tokenizer_config so we get back an empty dict. __UpperCamelCase = get_tokenizer_config(__A ) self.assertDictEqual(__A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = get_tokenizer_config(__A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' ) def _lowerCamelCase ( self : List[str] ): try: AutoConfig.register('custom' , __A ) AutoTokenizer.register(__A , slow_tokenizer_class=__A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoTokenizer.register(__A , slow_tokenizer_class=__A ) __UpperCamelCase = CustomTokenizer.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _lowerCamelCase ( self : Optional[int] ): try: AutoConfig.register('custom' , __A ) # Can register in two steps AutoTokenizer.register(__A , slow_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(__A , fast_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( __A , slow_tokenizer_class=__A , fast_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoTokenizer.register(__A , fast_tokenizer_class=__A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __UpperCamelCase = BertTokenizerFast.from_pretrained(__A ) bert_tokenizer.save_pretrained(__A ) __UpperCamelCase = CustomTokenizerFast.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , use_fast=__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _lowerCamelCase ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase = AutoTokenizer.from_pretrained(__A , trust_remote_code=__A , use_fast=__A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) @require_tokenizers def _lowerCamelCase ( self : Any ): class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =False class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =NewTokenizer SCREAMING_SNAKE_CASE_ : str =False try: AutoConfig.register('custom' , __A ) AutoTokenizer.register(__A , slow_tokenizer_class=__A ) AutoTokenizer.register(__A , fast_tokenizer_class=__A ) # If remote code is not set, the default is to use local __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertTrue(tokenizer.special_attribute_present ) __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _lowerCamelCase ( self : Dict ): __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __UpperCamelCase = AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A , use_fast=__A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def _lowerCamelCase ( self : int ): with self.assertRaisesRegex( __A , 'bert-base is not a local folder and is not a valid model identifier' ): __UpperCamelCase = AutoTokenizer.from_pretrained('bert-base' ) def _lowerCamelCase ( self : str ): with self.assertRaisesRegex( __A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): __UpperCamelCase = AutoTokenizer.from_pretrained(__A , revision='aaaaaa' ) def _lowerCamelCase ( self : Dict ): # Make sure we have cached the tokenizer. __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: __UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
53
def __snake_case ( _lowerCAmelCase : list ) -> list: if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] A_ : Tuple = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): A_ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) A_ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: A_ , A_ : str = arr[i], arr[0] else: A_ , A_ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 A_ : Tuple = 0 else: A_ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
300
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def __UpperCamelCase ( _A : int ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =384 lowerCamelCase_ =7 if "tiny" in model_name: lowerCamelCase_ =96 lowerCamelCase_ =(2, 2, 6, 2) lowerCamelCase_ =(3, 6, 12, 24) elif "small" in model_name: lowerCamelCase_ =96 lowerCamelCase_ =(2, 2, 18, 2) lowerCamelCase_ =(3, 6, 12, 24) elif "base" in model_name: lowerCamelCase_ =128 lowerCamelCase_ =(2, 2, 18, 2) lowerCamelCase_ =(4, 8, 16, 32) lowerCamelCase_ =12 lowerCamelCase_ =512 elif "large" in model_name: lowerCamelCase_ =192 lowerCamelCase_ =(2, 2, 18, 2) lowerCamelCase_ =(6, 12, 24, 48) lowerCamelCase_ =12 lowerCamelCase_ =768 # set label information lowerCamelCase_ =150 lowerCamelCase_ ="""huggingface/label-files""" lowerCamelCase_ ="""ade20k-id2label.json""" lowerCamelCase_ =json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase_ ={int(_A ): v for k, v in idalabel.items()} lowerCamelCase_ ={v: k for k, v in idalabel.items()} lowerCamelCase_ =SwinConfig( embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) lowerCamelCase_ =UperNetConfig( backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , ) return config def __UpperCamelCase ( _A : Optional[int] ) ->Dict: """simple docstring""" lowerCamelCase_ =[] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCamelCase ( _A : str , _A : Optional[Any] , _A : List[str] ) ->List[Any]: """simple docstring""" lowerCamelCase_ =dct.pop(_A ) lowerCamelCase_ =val def __UpperCamelCase ( _A : List[Any] , _A : Any ) ->int: """simple docstring""" lowerCamelCase_ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCamelCase_ =num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCamelCase_ =state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) lowerCamelCase_ =state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ =in_proj_weight[:dim, :] lowerCamelCase_ =in_proj_bias[: dim] lowerCamelCase_ =in_proj_weight[ dim : dim * 2, : ] lowerCamelCase_ =in_proj_bias[ dim : dim * 2 ] lowerCamelCase_ =in_proj_weight[ -dim :, : ] lowerCamelCase_ =in_proj_bias[-dim :] # fmt: on def __UpperCamelCase ( _A : Optional[int] ) ->str: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =x.shape lowerCamelCase_ =x.reshape(_A , 4 , in_channel // 4 ) lowerCamelCase_ =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A : List[Any] ) ->Any: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ =x.shape lowerCamelCase_ =x.reshape(_A , in_channel // 4 , 4 ) lowerCamelCase_ =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A ) return x def __UpperCamelCase ( _A : Union[str, Any] ) ->Any: """simple docstring""" lowerCamelCase_ =x.shape[0] lowerCamelCase_ =x.reshape(4 , in_channel // 4 ) lowerCamelCase_ =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A : List[Any] ) ->Tuple: """simple docstring""" lowerCamelCase_ =x.shape[0] lowerCamelCase_ =x.reshape(in_channel // 4 , 4 ) lowerCamelCase_ =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A ) return x def __UpperCamelCase ( _A : List[str] , _A : Dict , _A : Dict ) ->Any: """simple docstring""" lowerCamelCase_ ={ """upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""", """upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""", """upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""", """upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""", } lowerCamelCase_ =model_name_to_url[model_name] lowerCamelCase_ =torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" , file_name=_A )[ """state_dict""" ] for name, param in state_dict.items(): print(_A , param.shape ) lowerCamelCase_ =get_upernet_config(_A ) lowerCamelCase_ =UperNetForSemanticSegmentation(_A ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCamelCase_ =state_dict.pop(_A ) if "bn" in key: lowerCamelCase_ =key.replace("""bn""" , """batch_norm""" ) lowerCamelCase_ =val # rename keys lowerCamelCase_ =create_rename_keys(_A ) for src, dest in rename_keys: rename_key(_A , _A , _A ) read_in_q_k_v(_A , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCamelCase_ =reverse_correct_unfold_reduction_order(_A ) if "norm" in key: lowerCamelCase_ =reverse_correct_unfold_norm_order(_A ) model.load_state_dict(_A ) # verify on image lowerCamelCase_ ="""https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" lowerCamelCase_ =Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" ) lowerCamelCase_ =SegformerImageProcessor() lowerCamelCase_ =processor(_A , return_tensors="""pt""" ).pixel_values with torch.no_grad(): lowerCamelCase_ =model(_A ) lowerCamelCase_ =outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCamelCase_ =torch.tensor( [[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ) elif model_name == "upernet-swin-small": lowerCamelCase_ =torch.tensor( [[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] ) elif model_name == "upernet-swin-base": lowerCamelCase_ =torch.tensor( [[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] ) elif model_name == "upernet-swin-large": lowerCamelCase_ =torch.tensor( [[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_A ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_A ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-swin-tiny', type=str, choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']], help='Name of the Swin + UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A : Tuple = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
49
import numpy as np import qiskit def __UpperCamelCase ( _A : int = 8 , _A : int | None = None ) ->str: """simple docstring""" lowerCamelCase_ =np.random.default_rng(seed=_A ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. lowerCamelCase_ =6 * key_len # Measurement basis for Alice's qubits. lowerCamelCase_ =rng.integers(2 , size=_A ) # The set of states Alice will prepare. lowerCamelCase_ =rng.integers(2 , size=_A ) # Measurement basis for Bob's qubits. lowerCamelCase_ =rng.integers(2 , size=_A ) # Quantum Circuit to simulate BB84 lowerCamelCase_ =qiskit.QuantumCircuit(_A , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_A ): if alice_state[index] == 1: bbaa_circ.x(_A ) if alice_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_A ): if bob_basis[index] == 1: bbaa_circ.h(_A ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. lowerCamelCase_ =qiskit.execute(_A , _A , shots=1 , seed_simulator=_A ) # Returns the result of measurement. lowerCamelCase_ =job.result().get_counts(_A ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. lowerCamelCase_ ="""""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _A , _A , _A ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. lowerCamelCase_ =gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , """0""" ) return key if __name__ == "__main__": print(F"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
49
1
"""simple docstring""" import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_=10_24 , a_=10_24 , a_=3.6 ): '''simple docstring''' __snake_case : Tuple = tokenizer __snake_case : List[Any] = tokenizer.bos_token_id __snake_case : str = dataset __snake_case : Union[str, Any] = seq_length __snake_case : Union[str, Any] = seq_length * chars_per_token * num_of_sequences def __iter__(self ): '''simple docstring''' __snake_case : Any = iter(self.dataset ) __snake_case : List[Any] = True while more_examples: __snake_case , __snake_case : str = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(a_ )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: __snake_case : Union[str, Any] = False break __snake_case : Dict = tokenizer(a_ , truncation=a_ )['''input_ids'''] __snake_case : Tuple = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(a_ ) , self.seq_length ): __snake_case : Optional[int] = all_token_ids[i : i + self.seq_length] if len(a_ ) == self.seq_length: yield torch.tensor(a_ ) def lowercase ( _snake_case : List[str] ) ->Optional[Any]: """simple docstring""" __snake_case : List[str] = {'''streaming''': True} __snake_case : List[Any] = load_dataset(args.dataset_name , split='''train''' , **_snake_case ) __snake_case : Dict = ConstantLengthDataset(_snake_case , _snake_case , seq_length=args.seq_length ) __snake_case : List[str] = DataLoader(_snake_case , batch_size=args.batch_size ) return eval_dataloader def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]: """simple docstring""" model.eval() __snake_case : List[Any] = [] for step, batch in enumerate(_snake_case ): with torch.no_grad(): __snake_case : str = model(_snake_case , labels=_snake_case ) __snake_case : Dict = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_snake_case ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __snake_case : Tuple = torch.mean(torch.cat(_snake_case ) ) try: __snake_case : List[Any] = torch.exp(_snake_case ) except OverflowError: __snake_case : str = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator SCREAMING_SNAKE_CASE : Tuple = Accelerator() # Parse configuration SCREAMING_SNAKE_CASE : Dict = HfArgumentParser(EvaluationArguments) SCREAMING_SNAKE_CASE : Any = parser.parse_args() set_seed(args.seed) # Logging SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt) SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader SCREAMING_SNAKE_CASE : Optional[int] = create_dataloader(args) # Prepare everything with our `accelerator`. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate(args) logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
102
"""simple docstring""" lowerCamelCase_ : Any = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
81
0
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) _A = '''pytorch_model.bin''' _A = '''pytorch_model.bin.index.json''' _A = '''adapter_config.json''' _A = '''adapter_model.bin''' _A = '''adapter_model.safetensors''' _A = '''tf_model.h5''' _A = '''tf_model.h5.index.json''' _A = '''model.ckpt''' _A = '''flax_model.msgpack''' _A = '''flax_model.msgpack.index.json''' _A = '''model.safetensors''' _A = '''model.safetensors.index.json''' _A = '''config.json''' _A = '''preprocessor_config.json''' _A = FEATURE_EXTRACTOR_NAME _A = '''generation_config.json''' _A = '''modelcard.json''' _A = '''▁''' _A = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility _A = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. _A = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] _A = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def __UpperCamelCase ( _A ): if version.parse(_A ) < version.parse(_A ): if "dev" in min_version: lowerCAmelCase_ = ( '''This example requires a source install from HuggingFace Transformers (see ''' '''`https://huggingface.co/docs/transformers/installation#install-from-source`),''' ) else: lowerCAmelCase_ = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError( error_message + '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ''' '''versions of HuggingFace Transformers.''' )
167
def __UpperCamelCase ( _A ): if length <= 0 or not isinstance(_A , _A ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(_A )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
167
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : int = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys _lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowerCamelCase = 'ssube/stable-diffusion-x4-upscaler-onnx' def UpperCamelCase ( self , lowercase_=0 ): _snake_case : Dict = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) ) _snake_case : Any = torch.manual_seed(UpperCamelCase__ ) _snake_case : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCamelCase ( self ): _snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : int = self.get_dummy_inputs() _snake_case : List[str] = pipe(**UpperCamelCase__ ).images _snake_case : List[str] = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) _snake_case : int = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def UpperCamelCase ( self ): _snake_case : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _snake_case : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Optional[int] = self.get_dummy_inputs() _snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images _snake_case : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case : Any = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCamelCase ( self ): _snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Dict = self.get_dummy_inputs() _snake_case : List[Any] = pipe(**UpperCamelCase__ ).images _snake_case : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case : Optional[Any] = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCamelCase ( self ): _snake_case : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _snake_case : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Dict = self.get_dummy_inputs() _snake_case : Any = pipe(**UpperCamelCase__ ).images _snake_case : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case : Optional[Any] = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def UpperCamelCase ( self ): _snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) _snake_case : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Any = self.get_dummy_inputs() _snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images _snake_case : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _snake_case : Any = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowercase_ ( unittest.TestCase ): @property def UpperCamelCase ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase ( self ): _snake_case : List[str] = ort.SessionOptions() _snake_case : List[str] = False return options def UpperCamelCase ( self ): _snake_case : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _snake_case : Any = init_image.resize((128, 128) ) # using the PNDM scheduler by default _snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Union[str, Any] = '''A fantasy landscape, trending on artstation''' _snake_case : str = torch.manual_seed(0 ) _snake_case : List[str] = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" , ) _snake_case : int = output.images _snake_case : str = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _snake_case : Optional[int] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def UpperCamelCase ( self ): _snake_case : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) _snake_case : List[Any] = init_image.resize((128, 128) ) _snake_case : Optional[Any] = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) _snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) _snake_case : Optional[Any] = '''A fantasy landscape, trending on artstation''' _snake_case : List[Any] = torch.manual_seed(0 ) _snake_case : List[str] = pipe( prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="np" , ) _snake_case : str = output.images _snake_case : int = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _snake_case : int = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
367
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class lowercase_ : def UpperCamelCase ( self ): torch.manual_seed(0 ) _snake_case : Tuple = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) _snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) _snake_case : Optional[int] = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _snake_case : str = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) _snake_case : int = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self ): torch.manual_seed(0 ) _snake_case : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) _snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" ) torch.manual_seed(0 ) _snake_case : Any = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _snake_case : Union[str, Any] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , ) torch.manual_seed(0 ) _snake_case : List[str] = DDPMScheduler( num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , ) torch.manual_seed(0 ) _snake_case : Dict = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def UpperCamelCase ( self ): _snake_case : List[Any] = self.get_dummy_components() _snake_case : List[Any] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _snake_case : Union[str, Any] = self.get_dummy_inputs(lowercase_ ) _snake_case : Union[str, Any] = inputs["prompt"] _snake_case : Dict = inputs["generator"] _snake_case : Any = inputs["num_inference_steps"] _snake_case : Union[str, Any] = inputs["output_type"] if "image" in inputs: _snake_case : int = inputs["image"] else: _snake_case : Union[str, Any] = None if "mask_image" in inputs: _snake_case : int = inputs["mask_image"] else: _snake_case : List[str] = None if "original_image" in inputs: _snake_case : Tuple = inputs["original_image"] else: _snake_case : Any = None _snake_case ,_snake_case : Optional[int] = pipe.encode_prompt(lowercase_ ) # inputs with prompt converted to embeddings _snake_case : Dict = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: _snake_case : int = image if mask_image is not None: _snake_case : int = mask_image if original_image is not None: _snake_case : Any = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(lowercase_ , lowercase_ , lowercase_ ) _snake_case : Optional[int] = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) _snake_case : Any = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , ) _snake_case : int = self.get_dummy_inputs(lowercase_ ) _snake_case : Optional[int] = inputs["generator"] _snake_case : List[Any] = inputs["num_inference_steps"] _snake_case : Tuple = inputs["output_type"] # inputs with prompt converted to embeddings _snake_case : int = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: _snake_case : int = image if mask_image is not None: _snake_case : str = mask_image if original_image is not None: _snake_case : int = original_image _snake_case : Optional[Any] = pipe_loaded(**lowercase_ )[0] _snake_case : Dict = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1e-4 ) def UpperCamelCase ( self ): _snake_case : Tuple = self.get_dummy_components() _snake_case : Any = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) _snake_case : int = self.get_dummy_inputs(lowercase_ ) _snake_case : List[str] = pipe(**lowercase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(lowercase_ ) _snake_case : List[Any] = self.pipeline_class.from_pretrained(lowercase_ ) pipe_loaded.to(lowercase_ ) pipe_loaded.set_progress_bar_config(disable=lowercase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _snake_case : Optional[Any] = self.get_dummy_inputs(lowercase_ ) _snake_case : int = pipe_loaded(**lowercase_ )[0] _snake_case : Tuple = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max() self.assertLess(lowercase_ , 1e-4 )
284
0
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __SCREAMING_SNAKE_CASE =( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) __SCREAMING_SNAKE_CASE =( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) __SCREAMING_SNAKE_CASE =( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) __SCREAMING_SNAKE_CASE =( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) __SCREAMING_SNAKE_CASE =( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) __SCREAMING_SNAKE_CASE =( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) __SCREAMING_SNAKE_CASE =( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def lowercase__( ): lowercase_ , lowercase_ : List[Any] = randrange(len(__SCREAMING_SNAKE_CASE ) ), randrange(len(__SCREAMING_SNAKE_CASE ) ) lowercase_ : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] lowercase_ , lowercase_ : List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00 ): return (generate_random_hand() for _ in range(__SCREAMING_SNAKE_CASE )) @pytest.mark.parametrize('hand, expected' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): assert PokerHand(__SCREAMING_SNAKE_CASE )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ): assert PokerHand(__SCREAMING_SNAKE_CASE )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : Dict = PokerHand(__SCREAMING_SNAKE_CASE ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ): assert PokerHand(__SCREAMING_SNAKE_CASE )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): assert PokerHand(__SCREAMING_SNAKE_CASE )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict ): assert PokerHand(__SCREAMING_SNAKE_CASE ).compare_with(PokerHand(__SCREAMING_SNAKE_CASE ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ): assert PokerHand(__SCREAMING_SNAKE_CASE ).compare_with(PokerHand(__SCREAMING_SNAKE_CASE ) ) == expected def lowercase__( ): lowercase_ : Optional[int] = [PokerHand(__SCREAMING_SNAKE_CASE ) for hand in SORTED_HANDS] lowercase_ : Tuple = poker_hands.copy() shuffle(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = chain(sorted(__SCREAMING_SNAKE_CASE ) ) for index, hand in enumerate(__SCREAMING_SNAKE_CASE ): assert hand == poker_hands[index] def lowercase__( ): # Test that five high straights are compared correctly. lowercase_ : Union[str, Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=__SCREAMING_SNAKE_CASE ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase__( ): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. lowercase_ : int = PokerHand('2C 4S AS 3D 5C' ) lowercase_ : Union[str, Any] = True lowercase_ : Optional[Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase__( ): # Problem number 54 from Project Euler # Testing from poker_hands.txt file lowercase_ : Any = 0 lowercase_ : Union[str, Any] = os.path.abspath(os.path.dirname(__SCREAMING_SNAKE_CASE ) ) lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , 'poker_hands.txt' ) with open(__SCREAMING_SNAKE_CASE ) as file_hand: for line in file_hand: lowercase_ : str = line[:14].strip() lowercase_ : Optional[Any] = line[15:].strip() lowercase_ , lowercase_ : List[str] = PokerHand(__SCREAMING_SNAKE_CASE ), PokerHand(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = player.compare_with(__SCREAMING_SNAKE_CASE ) if output == "Win": answer += 1 assert answer == 3_76
213
"""simple docstring""" from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def lowercase__( ): lowercase_ : Any = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE ) EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE ) TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE ) RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE ) DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE ) # Parse args lowercase_ , lowercase_ : Dict = parser.parse_known_args() if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ): parser.print_help() exit(1 ) lowercase_ : int = parse_unknown_args(__SCREAMING_SNAKE_CASE ) # Run lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) service.run() if __name__ == "__main__": main()
213
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __magic_name__: Optional[Any] = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: List[Any] = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: List[Any] = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
138
from manim import * class snake_case__ ( _lowerCAmelCase ): def __magic_name__ ( self ) -> Dict: __magic_name__ : int = Rectangle(height=0.5 , width=0.5 ) __magic_name__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 ) __magic_name__ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __magic_name__ : List[Any] = [mem.copy() for i in range(6 )] __magic_name__ : int = [mem.copy() for i in range(6 )] __magic_name__ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Union[str, Any] = Text("""CPU""" , font_size=24 ) __magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Any = [mem.copy() for i in range(4 )] __magic_name__ : List[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Tuple = Text("""GPU""" , font_size=24 ) __magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = [mem.copy() for i in range(6 )] __magic_name__ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = Text("""Model""" , font_size=24 ) __magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : str = [] __magic_name__ : Tuple = [] __magic_name__ : Union[str, Any] = [] for i, rect in enumerate(lowerCAmelCase__ ): rect.set_stroke(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 ) self.add(lowerCAmelCase__ ) model_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) __magic_name__ : Optional[Any] = [mem.copy() for i in range(6 )] __magic_name__ : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Any = Text("""Loaded Checkpoint""" , font_size=24 ) __magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCAmelCase__ ) __magic_name__ : Optional[int] = [] __magic_name__ : Tuple = [] for i, rect in enumerate(lowerCAmelCase__ ): __magic_name__ : Dict = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 ) target.move_to(lowerCAmelCase__ ) ckpt_arr.append(lowerCAmelCase__ ) __magic_name__ : int = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCAmelCase__ ) self.add(*lowerCAmelCase__ , *lowerCAmelCase__ ) __magic_name__ : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __magic_name__ : str = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCAmelCase__ , lowerCAmelCase__ ) __magic_name__ : Any = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCAmelCase__ ) __magic_name__ : Optional[Any] = MarkupText( F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) __magic_name__ : int = [meta_mem.copy() for i in range(6 )] __magic_name__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )] __magic_name__ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : Tuple = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 ) __magic_name__ : int = Text("""Disk""" , font_size=24 ) __magic_name__ : Union[str, Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) ) __magic_name__ : List[Any] = [] for i, rect in enumerate(lowerCAmelCase__ ): __magic_name__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) ) self.play(*lowerCAmelCase__ ) self.play(FadeOut(lowerCAmelCase__ ) ) __magic_name__ : str = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase__ , run_time=3 ) ) self.play( FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , ) self.wait()
138
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = """big_bird""" def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=5_0_3_5_8 , UpperCamelCase__ : Tuple=7_6_8 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : List[str]=3_0_7_2 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=4_0_9_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : List[str]=1E-1_2 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=0 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Any=6_6 , UpperCamelCase__ : Dict="block_sparse" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=6_4 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ): """simple docstring""" super().__init__( pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = type_vocab_size UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = rescale_embeddings UpperCamelCase = attention_type UpperCamelCase = use_bias UpperCamelCase = block_size UpperCamelCase = num_random_blocks UpperCamelCase = classifier_dropout class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" @property def A ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
28
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() @dataclass class a_ : '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = field(default_factory=snake_case_ ) UpperCamelCase = field(default_factory=snake_case_ ) def snake_case_( self , A , A , A ) -> Optional[int]: _SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad ) if has_not_submodules: self.traced.append(A ) def __call__( self , A ) -> str: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(A ) [x.remove() for x in self.handles] return self @property def snake_case_( self ) -> str: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class a_ : '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 0 UpperCamelCase = field(default_factory=snake_case_ ) UpperCamelCase = field(default_factory=snake_case_ ) def __call__( self , A ) -> List[str]: _SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized _SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized _SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) ) _SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) ) if len(A ) != len(A ): raise Exception( f'Numbers of operations are different. Source module has {len(A )} operations while' f' destination module has {len(A )}.' ) for dest_m, src_m in zip(A , A ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int: print(F'Converting {name}...' ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval() _SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCamelCase ) assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one." _SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}' print(__lowerCamelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , ) # we can use the convnext one _SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , ) print(F'Pushed {checkpoint_name}' ) def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any: _SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" _SCREAMING_SNAKE_CASE = 1000 _SCREAMING_SNAKE_CASE = (1, num_labels) _SCREAMING_SNAKE_CASE = """huggingface/label-files""" _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = { """resnet18""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet26""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet34""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet50""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet101""": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), """resnet152""": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ), } if model_name: convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
58
0
"""simple docstring""" import os import sys import unittest UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCamelCase_ = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') UpperCamelCase_ = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Dict: a_ = get_test_to_tester_mapping(_UpperCAmelCase) a_ = get_test_to_tester_mapping(_UpperCAmelCase) a_ = {'''BertModelTest''': '''BertModelTester'''} a_ = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase) self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[int]: a_ = get_model_to_test_mapping(_UpperCAmelCase) a_ = get_model_to_test_mapping(_UpperCAmelCase) a_ = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } a_ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase) self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase) def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = get_model_to_tester_mapping(_UpperCAmelCase) a_ = get_model_to_tester_mapping(_UpperCAmelCase) a_ = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } a_ = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase) self.assertEqual(get_test_info.to_json(_UpperCAmelCase) , _UpperCAmelCase)
352
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase_ = logging.get_logger(__name__) class snake_case ( SCREAMING_SNAKE_CASE_ ): def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None: warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
303
0
"""simple docstring""" from math import ceil, sqrt def UpperCamelCase__ ( lowercase__ : int = 100_0000 ): snake_case : Optional[Any] = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: snake_case : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: snake_case : Optional[int] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'{solution() = }')
148
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _A : List[Any] = 299792458 # Symbols _A , _A , _A , _A : Union[str, Any] = symbols('''ct x y z''') def UpperCamelCase_ ( snake_case_ : float ) -> float: '''simple docstring''' if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def UpperCamelCase_ ( snake_case_ : float ) -> float: '''simple docstring''' return 1 / sqrt(1 - beta(snake_case_ ) ** 2 ) def UpperCamelCase_ ( snake_case_ : float ) -> np.ndarray: '''simple docstring''' return np.array( [ [gamma(snake_case_ ), -gamma(snake_case_ ) * beta(snake_case_ ), 0, 0], [-gamma(snake_case_ ) * beta(snake_case_ ), gamma(snake_case_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def UpperCamelCase_ ( snake_case_ : float , snake_case_ : np.ndarray | None = None ) -> np.ndarray: '''simple docstring''' if event is None: __lowerCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(snake_case_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _A : str = transform(29979245) print('''Example of four vector: ''') print(f'ct\' = {four_vector[0]}') print(f'x\' = {four_vector[1]}') print(f'y\' = {four_vector[2]}') print(f'z\' = {four_vector[3]}') # Substitute symbols with numerical values _A : int = {ct: c, x: 1, y: 1, z: 1} _A : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'\n{numerical_vector}')
229
0
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _lowercase ( snake_case_ , unittest.TestCase ): lowercase = DebertaVaTokenizer lowercase = DebertaVaTokenizerFast lowercase = True lowercase = True def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_ : List[str] = DebertaVaTokenizer(snake_case , unk_token='<unk>' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[Any] ) -> int: """simple docstring""" UpperCamelCase_ : Union[str, Any] = 'this is a test' UpperCamelCase_ : Union[str, Any] = 'this is a test' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : Any = '<pad>' UpperCamelCase_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple: """simple docstring""" UpperCamelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '[PAD]' ) self.assertEqual(len(snake_case ) , 3_0_0_0_1 ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : int = ' \tHeLLo!how \n Are yoU? ' UpperCamelCase_ : Optional[Any] = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on UpperCamelCase_ : List[Any] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case ) UpperCamelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case ) UpperCamelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int: """simple docstring""" pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCamelCase_ : Any = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : Dict = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase_ : Optional[Any] = DebertaVaTokenizer(snake_case , split_by_punct=snake_case ) UpperCamelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Tuple = DebertaVaTokenizerFast(snake_case , split_by_punct=snake_case ) UpperCamelCase_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple: """simple docstring""" UpperCamelCase_ : str = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : int = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase_ : Optional[int] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : str = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str: """simple docstring""" UpperCamelCase_ : Dict = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : int = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on UpperCamelCase_ : Dict = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : List[Any] = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on UpperCamelCase_ : str = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Union[str, Any] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : str = ' \tHeLLo!how \n Are yoU? ' UpperCamelCase_ : Union[str, Any] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on UpperCamelCase_ : Optional[Any] = DebertaVaTokenizer(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : List[str] = DebertaVaTokenizerFast(snake_case , do_lower_case=snake_case , split_by_punct=snake_case ) UpperCamelCase_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : List[str] = self.get_tokenizer() UpperCamelCase_ : Any = self.get_rust_tokenizer() UpperCamelCase_ : str = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) UpperCamelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : str = tokenizer.encode(snake_case , add_special_tokens=snake_case ) UpperCamelCase_ : Dict = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = self.get_rust_tokenizer() UpperCamelCase_ : Tuple = tokenizer.encode(snake_case ) UpperCamelCase_ : Tuple = rust_tokenizer.encode(snake_case ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" UpperCamelCase_ : Union[str, Any] = 'This is a test' UpperCamelCase_ : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9] UpperCamelCase_ : str = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] UpperCamelCase_ : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] UpperCamelCase_ : Optional[int] = DebertaVaTokenizer(snake_case , keep_accents=snake_case ) UpperCamelCase_ : List[Any] = DebertaVaTokenizerFast(snake_case , keep_accents=snake_case ) UpperCamelCase_ : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Any = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) # fmt: off UpperCamelCase_ : str = 'I was born in 92000, and this is falsé.' UpperCamelCase_ : Tuple = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] UpperCamelCase_ : Union[str, Any] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] UpperCamelCase_ : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on UpperCamelCase_ : Union[str, Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Optional[int] = tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : int = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Optional[Any] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : Tuple = rust_tokenizer.tokenize(snake_case ) self.assertListEqual(snake_case , snake_case ) UpperCamelCase_ : str = rust_tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCamelCase_ : Any = DebertaVaTokenizer(snake_case ) UpperCamelCase_ : Union[str, Any] = tokenizer.encode('sequence builders' ) UpperCamelCase_ : int = tokenizer.encode('multi-sequence build' ) UpperCamelCase_ : int = tokenizer.build_inputs_with_special_tokens(snake_case ) UpperCamelCase_ : Any = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , snake_case ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , snake_case , ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : Dict = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
50
from timeit import timeit a_ = { 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : Union[str, Any] = 0 UpperCamelCase_ : Optional[Any] = len(lowerCamelCase ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase ) // 2 UpperCamelCase_ : Tuple = len(lowerCamelCase ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase ) ) def __lowercase ( lowerCamelCase : str ): if len(lowerCamelCase ) <= 2: return True if s[0] == s[len(lowerCamelCase ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def __lowercase ( lowerCamelCase : str ): return s == s[::-1] def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : Optional[Any] = F"all({name}(key) is value for key, value in test_data.items())" UpperCamelCase_ : Tuple = F"from __main__ import test_data, {name}" UpperCamelCase_ : List[str] = 500000 UpperCamelCase_ : int = timeit(stmt=lowerCamelCase , setup=lowerCamelCase , number=lowerCamelCase ) print(F"{name:<35} finished {number:,} runs in {result:.5f} seconds" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"""{key:21} {value}""") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
50
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowerCamelCase : Tuple = 'http://www.mocksite.com/file1.txt' lowerCamelCase : Union[str, Any] = '"text": ["foo", "foo"]' lowerCamelCase : Dict = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 200 lowerCAmelCase__ : str = {"""Content-Length""": """100"""} lowerCAmelCase__ : Optional[Any] = {} def UpperCamelCase__ (self : Tuple , **UpperCamelCase : int ): '''simple docstring''' return [bytes(UpperCamelCase , '''utf-8''' )] def _SCREAMING_SNAKE_CASE (*A , **A ) -> str: """simple docstring""" return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]: """simple docstring""" import requests monkeypatch.setattr(A , '''request''' , A ) lowercase__ = URL if issubclass(A , A ): lowercase__ = url elif issubclass(A , A ): lowercase__ = [url] elif issubclass(A , A ): lowercase__ = {'''train''': url} lowercase__ = '''dummy''' lowercase__ = '''downloads''' lowercase__ = tmp_path lowercase__ = DownloadConfig( cache_dir=os.path.join(A , A ) , use_etag=A , ) lowercase__ = DownloadManager(dataset_name=A , download_config=A ) lowercase__ = dl_manager.download(A ) lowercase__ = urls for downloaded_paths in [downloaded_paths]: if isinstance(A , A ): lowercase__ = [downloaded_paths] lowercase__ = [urls] elif isinstance(A , A ): assert "train" in downloaded_paths.keys() lowercase__ = downloaded_paths.values() lowercase__ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(A , A ): assert downloaded_path == dl_manager.downloaded_paths[input_url] lowercase__ = Path(A ) lowercase__ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() lowercase__ = downloaded_path.read_text() assert content == CONTENT lowercase__ = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() lowercase__ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]: """simple docstring""" lowercase__ = str(A ) if issubclass(A , A ): lowercase__ = filename elif issubclass(A , A ): lowercase__ = [filename] elif issubclass(A , A ): lowercase__ = {'''train''': filename} lowercase__ = '''dummy''' lowercase__ = xz_file.parent lowercase__ = '''extracted''' lowercase__ = DownloadConfig( cache_dir=A , use_etag=A , ) lowercase__ = DownloadManager(dataset_name=A , download_config=A ) lowercase__ = dl_manager.extract(A ) lowercase__ = paths for extracted_paths in [extracted_paths]: if isinstance(A , A ): lowercase__ = [extracted_paths] lowercase__ = [paths] elif isinstance(A , A ): assert "train" in extracted_paths.keys() lowercase__ = extracted_paths.values() lowercase__ = paths.values() assert extracted_paths for extracted_path, input_path in zip(A , A ): assert extracted_path == dl_manager.extracted_paths[input_path] lowercase__ = Path(A ) lowercase__ = extracted_path.parts assert parts[-1] == hash_url_to_filename(A , etag=A ) assert parts[-2] == extracted_subdir assert extracted_path.exists() lowercase__ = extracted_path.read_text() lowercase__ = text_file.read_text() assert extracted_file_content == expected_file_content def _SCREAMING_SNAKE_CASE (A , A ) -> Any: """simple docstring""" assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(A , start=1 ): lowercase__ = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _SCREAMING_SNAKE_CASE (A , A ) -> Any: """simple docstring""" lowercase__ = request.getfixturevalue(A ) lowercase__ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ): _test_jsonl(A , A ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _SCREAMING_SNAKE_CASE (A , A ) -> int: """simple docstring""" lowercase__ = request.getfixturevalue(A ) lowercase__ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A ) , start=1 ): _test_jsonl(A , A ) assert num_tar == 1 assert num_jsonl == 2 def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(A ) , start=1 ): assert os.path.basename(A ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
2
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a_ ( a__ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline __SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] __SCREAMING_SNAKE_CASE : List[str] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __SCREAMING_SNAKE_CASE : List[Any] = False @property def __lowerCAmelCase ( self ) ->Optional[Any]: return 32 @property def __lowerCAmelCase ( self ) ->Optional[int]: return 32 @property def __lowerCAmelCase ( self ) ->str: return self.time_input_dim @property def __lowerCAmelCase ( self ) ->Dict: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) ->Tuple: return 100 @property def __lowerCAmelCase ( self ) ->int: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def __lowerCAmelCase ( self ) ->Any: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) ->Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : str = self.dummy_unet SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : str = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int: SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCamelCase ) # create init_image SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) # create hint SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase ) else: SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) ->Dict: SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class a_ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) ) SCREAMING_SNAKE_CASE : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0 SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo''' SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior( _lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : List[str] = pipeline( image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
313
0
import copy import re class __a : _a : Dict = 'hp' _a : List[Any] = {} _a : str = None @classmethod def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = prefix _UpperCAmelCase = defaults cls.build_naming_info() @staticmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if len(__UpperCamelCase ) == 0: return "" _UpperCAmelCase = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__UpperCamelCase ) + 1 ): _UpperCAmelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCAmelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = '' while integer != 0: _UpperCAmelCase = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s _UpperCAmelCase = 0 while True: _UpperCAmelCase = word + '#' + int_to_alphabetic(__UpperCamelCase ) if sword in info["reverse_short_word"]: continue else: _UpperCAmelCase = sword break _UpperCAmelCase = short_word _UpperCAmelCase = word return short_word @staticmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = param_name.split('_' ) _UpperCAmelCase = [TrialShortNamer.shortname_for_word(__UpperCamelCase , __UpperCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCAmelCase = ['', '_'] for separator in separators: _UpperCAmelCase = separator.join(__UpperCamelCase ) if shortname not in info["reverse_short_param"]: _UpperCAmelCase = shortname _UpperCAmelCase = param_name return shortname return param_name @staticmethod def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" _UpperCAmelCase = TrialShortNamer.shortname_for_key(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = short_name _UpperCAmelCase = param_name @classmethod def UpperCAmelCase__ ( cls ) -> int: """simple docstring""" if cls.NAMING_INFO is not None: return _UpperCAmelCase = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } _UpperCAmelCase = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = info @classmethod def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None _UpperCAmelCase = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCAmelCase = cls.NAMING_INFO['short_param'][k] if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = 1 if v else 0 _UpperCAmelCase = '' if isinstance(__UpperCamelCase , (int, float) ) else '-' _UpperCAmelCase = f'''{key}{sep}{v}''' name.append(__UpperCamelCase ) return "_".join(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" _UpperCAmelCase = repr[len(cls.PREFIX ) + 1 :] if repr == "": _UpperCAmelCase = [] else: _UpperCAmelCase = repr.split('_' ) _UpperCAmelCase = {} for value in values: if "-" in value: _UpperCAmelCase , _UpperCAmelCase = value.split('-' ) else: _UpperCAmelCase = re.sub('[0-9.]' , '' , __UpperCamelCase ) _UpperCAmelCase = float(re.sub('[^0-9.]' , '' , __UpperCamelCase ) ) _UpperCAmelCase = cls.NAMING_INFO['reverse_short_param'][p_k] _UpperCAmelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCAmelCase = cls.DEFAULTS[k] return parameters
360
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __a : _a : Dict = BlenderbotConfig _a : Dict = {} _a : Union[str, Any] = 'gelu' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" _UpperCAmelCase = TFBlenderbotModel(config=_SCREAMING_SNAKE_CASE ).get_decoder() _UpperCAmelCase = inputs_dict['input_ids'] _UpperCAmelCase = input_ids[:1, :] _UpperCAmelCase = inputs_dict['attention_mask'][:1, :] _UpperCAmelCase = inputs_dict['head_mask'] _UpperCAmelCase = 1 # first forward pass _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] _UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 ) def lowerCAmelCase__ ( a__: Dict , a__: Dict , a__: Any , a__: Any=None , a__: List[Any]=None , a__: Union[str, Any]=None , a__: Tuple=None , a__: Union[str, Any]=None , ) -> Any: '''simple docstring''' if attention_mask is None: _UpperCAmelCase = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _a : List[Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () _a : List[str] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () _a : List[str] = ( { 'conversational': TFBlenderbotForConditionalGeneration, 'feature-extraction': TFBlenderbotModel, 'summarization': TFBlenderbotForConditionalGeneration, 'text2text-generation': TFBlenderbotForConditionalGeneration, 'translation': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) _a : Dict = True _a : int = False _a : Union[str, Any] = False def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = TFBlenderbotModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE ) @require_tokenizers @require_tf class __a ( unittest.TestCase ): _a : int = ['My friends are cool but they eat too many carbs.'] _a : List[Any] = 'facebook/blenderbot-400M-distill' @cached_property def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCAmelCase__ ( self ) -> str: """simple docstring""" _UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' ) _UpperCAmelCase = self.model.generate( model_inputs.input_ids , ) _UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
185
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : Optional[int] = """vit""" def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ) ->Tuple: super().__init__(**__UpperCAmelCase) a_ = hidden_size a_ = num_hidden_layers a_ = num_attention_heads a_ = intermediate_size a_ = hidden_act a_ = hidden_dropout_prob a_ = attention_probs_dropout_prob a_ = initializer_range a_ = layer_norm_eps a_ = image_size a_ = patch_size a_ = num_channels a_ = qkv_bias a_ = encoder_stride class snake_case ( SCREAMING_SNAKE_CASE_ ): a_ : int = version.parse("""1.11""" ) @property def UpperCAmelCase__ ( self) ->List[Any]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def UpperCAmelCase__ ( self) ->str: return 1E-4
243
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list: if len(SCREAMING_SNAKE_CASE_ ) <= 1: return [tuple(SCREAMING_SNAKE_CASE_ )] lowerCAmelCase__ : Optional[Any] = [] def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , SCREAMING_SNAKE_CASE_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i] else: # k is odd lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , SCREAMING_SNAKE_CASE_ ) generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return res if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
212
0
"""simple docstring""" def lowercase_ ( __UpperCAmelCase ) -> bool: if num < 0: return False lowerCAmelCase__ : int = num lowerCAmelCase__ : int = 0 while num > 0: lowerCAmelCase__ : List[str] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
359
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: # Initialise PyTorch model lowerCAmelCase__ : int = TaConfig.from_json_file(__UpperCAmelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
212
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : Dict = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __A( a ): snake_case_ = '''wav2vec2''' def __init__( self , _snake_case=32 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(512, 512, 512, 512, 512, 512, 512) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=128 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=320 , _snake_case=2 , _snake_case=0.1 , _snake_case=100 , _snake_case=256 , _snake_case=256 , _snake_case=0.1 , _snake_case="sum" , _snake_case=False , _snake_case=False , _snake_case=256 , _snake_case=(512, 512, 512, 512, 1_500) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=512 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=False , _snake_case=3 , _snake_case=2 , _snake_case=3 , _snake_case=None , _snake_case=None , **_snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) __a = hidden_size __a = feat_extract_norm __a = feat_extract_activation __a = list(_snake_case ) __a = list(_snake_case ) __a = list(_snake_case ) __a = conv_bias __a = num_conv_pos_embeddings __a = num_conv_pos_embedding_groups __a = len(self.conv_dim ) __a = num_hidden_layers __a = intermediate_size __a = hidden_act __a = num_attention_heads __a = hidden_dropout __a = attention_dropout __a = activation_dropout __a = feat_proj_dropout __a = final_dropout __a = layerdrop __a = layer_norm_eps __a = initializer_range __a = vocab_size __a = do_stable_layer_norm __a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a = apply_spec_augment __a = mask_time_prob __a = mask_time_length __a = mask_time_min_masks __a = mask_feature_prob __a = mask_feature_length __a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __a = num_codevectors_per_group __a = num_codevector_groups __a = contrastive_logits_temperature __a = feat_quantizer_dropout __a = num_negatives __a = codevector_dim __a = proj_codevector_dim __a = diversity_loss_weight # ctc loss __a = ctc_loss_reduction __a = ctc_zero_infinity # adapter __a = add_adapter __a = adapter_kernel_size __a = adapter_stride __a = num_adapter_layers __a = output_hidden_size or hidden_size __a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __a = list(_snake_case ) __a = list(_snake_case ) __a = list(_snake_case ) __a = xvector_output_dim @property def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
6
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if exponent == 1: return base if exponent % 2 == 0: lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , exponent // 2 , SCREAMING_SNAKE_CASE ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(SCREAMING_SNAKE_CASE , exponent - 1 , SCREAMING_SNAKE_CASE )) % modulo_value def _a ( SCREAMING_SNAKE_CASE = 17_77 , SCREAMING_SNAKE_CASE = 18_55 , SCREAMING_SNAKE_CASE = 8 ): """simple docstring""" lowercase__ = base for _ in range(1 , SCREAMING_SNAKE_CASE ): lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 10**digits ) return result if __name__ == "__main__": print(f"""{solution() = }""")
110
0
"""simple docstring""" from random import randint, random def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : int , lowercase : Any , lowercase : Union[str, Any] = False , lowercase : int = False , lowercase : Optional[Any] = 5 , ): '''simple docstring''' lowerCamelCase_ = [[-1] * number_of_cells] # Create a highway without any car lowerCamelCase_ = 0 lowerCamelCase_ = max(a_ , 0 ) while i < number_of_cells: lowerCamelCase_ = ( randint(0 , a_ ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = 0 lowerCamelCase_ = highway_now[car_index + 1 :] for cell in range(len(a_ ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(a_ , -1 ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[Any] , lowercase : int ): '''simple docstring''' lowerCamelCase_ = len(a_ ) # Beforce calculations, the highway is empty lowerCamelCase_ = [-1] * number_of_cells for car_index in range(a_ ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowerCamelCase_ = min(highway_now[car_index] + 1 , a_ ) # Number of empty cell before the next car lowerCamelCase_ = get_distance(a_ , a_ ) - 1 # We can't have the car causing an accident lowerCamelCase_ = min(next_highway[car_index] , a_ ) if random() < probability: # Randomly, a driver will slow down lowerCamelCase_ = max(next_highway[car_index] - 1 , 0 ) return next_highway def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Union[str, Any] , lowercase : Any , lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = len(highway[0] ) for i in range(a_ ): lowerCamelCase_ = update(highway[i] , a_ , a_ ) lowerCamelCase_ = [-1] * number_of_cells for car_index in range(a_ ): lowerCamelCase_ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowerCamelCase_ = (car_index + speed) % number_of_cells # Commit the change of position lowerCamelCase_ = speed highway.append(a_ ) return highway if __name__ == "__main__": import doctest doctest.testmod()
364
def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' if not all(char in '01' for char in bin_string ): raise ValueError('Non-binary value was passed to the function' ) if not bin_string: raise ValueError('Empty string was passed to the function' ) lowerCamelCase_ = '' while len(lowercase ) % 3 != 0: lowerCamelCase_ = '0' + bin_string lowerCamelCase_ = [ bin_string[index : index + 3] for index in range(len(lowercase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCamelCase_ = 0 for index, val in enumerate(lowercase ): oct_val += int(2 ** (2 - index) * int(lowercase ) ) oct_string += str(lowercase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
208
0
from __future__ import annotations def a__ ( UpperCAmelCase : list[int] ) -> int: if not nums: return 0 UpperCAmelCase : Optional[int] = nums[0] UpperCAmelCase : Tuple = 0 for num in nums[1:]: UpperCAmelCase , UpperCAmelCase : Any = ( max_excluding + num, max(UpperCAmelCase , UpperCAmelCase ), ) return max(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
336
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
import os import sys lowercase = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowercase = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : int, **UpperCamelCase__ : Dict ): '''simple docstring''' return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : List[Any], **UpperCamelCase__ : Tuple ): '''simple docstring''' return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Dict, **UpperCamelCase__ : List[Any] ): '''simple docstring''' return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Dict, **UpperCamelCase__ : Optional[int] ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : List[str], **UpperCamelCase__ : List[str] ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Union[str, Any], **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Dict, **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ )
366
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' def A_ ( self : List[Any] ): UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCamelCase__ = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) ) self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) ) def A_ ( self : Tuple ): UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) UpperCamelCase__ = get_activation('''gelu''' ) UpperCamelCase__ = get_activation('''gelu_10''' ) UpperCamelCase__ = torch_builtin(_a ) UpperCamelCase__ = geluaa(_a ) UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(_a ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def A_ ( self : str ): get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(_a ): get_activation('''bogus''' ) with self.assertRaises(_a ): get_activation(_a ) def A_ ( self : List[Any] ): UpperCamelCase__ = get_activation('''gelu''' ) UpperCamelCase__ = 1 UpperCamelCase__ = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(_a ): UpperCamelCase__ = acta.a
35
0
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set ): lowercase_ , lowercase_ : int = len(UpperCAmelCase_ ), len(grid[0] ) if ( min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase_ : Optional[int] = 0 count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ ) count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ ) count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ ) count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
213
"""simple docstring""" class a : """simple docstring""" def __init__( self: Dict ): """simple docstring""" A__ = {} def UpperCamelCase ( self: List[str] ): """simple docstring""" print(self.vertex ) for i in self.vertex: print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) ) def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ): """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(UpperCamelCase ) else: # else make a new vertex A__ = [to_vertex] def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(UpperCamelCase , UpperCamelCase ) def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ): """simple docstring""" A__ = True print(UpperCamelCase , end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ : Optional[int] = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('DFS:') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
335
0
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class snake_case ( __snake_case ): def __init__( self : Dict , UpperCamelCase__ : List[str]=0.01 , UpperCamelCase__ : Optional[int]=1_0_0_0)-> Dict: '''simple docstring''' __lowerCAmelCase: Dict = p_stop __lowerCAmelCase: Tuple = max_length def __iter__( self : Optional[int])-> List[Any]: '''simple docstring''' __lowerCAmelCase: Dict = 0 __lowerCAmelCase: int = False while not stop and count < self.max_length: yield count count += 1 __lowerCAmelCase: int = random.random() < self.p_stop class snake_case ( unittest.TestCase ): def lowercase_ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=False , UpperCamelCase__ : Tuple=True)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Dict = [ BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) for i in range(2) ] __lowerCAmelCase: Tuple = [list(UpperCamelCase__) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCamelCase__) for shard in batch_sampler_shards] , [len(UpperCamelCase__) for e in expected]) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) def lowercase_ ( self : Any)-> List[Any]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = BatchSampler(range(2_4) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: Optional[Any] = BatchSampler(range(2_4) , batch_size=3 , drop_last=UpperCamelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCAmelCase: List[str] = BatchSampler(range(2_1) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: Tuple = BatchSampler(range(2_1) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[int] = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCAmelCase: int = BatchSampler(range(2_2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: str = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: int = BatchSampler(range(2_2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCAmelCase: List[Any] = BatchSampler(range(2_0) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: List[str] = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: Optional[int] = BatchSampler(range(2_0) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: str = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) # Check the shards when the dataset is very small. __lowerCAmelCase: Tuple = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: str = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: str = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__) def lowercase_ ( self : List[Any])-> Dict: '''simple docstring''' __lowerCAmelCase: Dict = BatchSampler(range(2_4) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) __lowerCAmelCase: List[str] = BatchSampler(range(2_4) , batch_size=4 , drop_last=UpperCamelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size. __lowerCAmelCase: Tuple = BatchSampler(range(2_2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) __lowerCAmelCase: List[Any] = BatchSampler(range(2_2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCAmelCase: Union[str, Any] = BatchSampler(range(2_1) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) __lowerCAmelCase: Any = BatchSampler(range(2_1) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) # Check the shards when the dataset is very small. __lowerCAmelCase: Tuple = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: List[str] = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) __lowerCAmelCase: str = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__) def lowercase_ ( self : int)-> Dict: '''simple docstring''' __lowerCAmelCase: List[Any] = BatchSampler(range(2_4) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = BatchSampler(range(2_4) , batch_size=3 , drop_last=UpperCamelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is a round multiple of batch size but not total batch size. __lowerCAmelCase: List[Any] = BatchSampler(range(2_1) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Any = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: int = BatchSampler(range(2_1) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. __lowerCAmelCase: Tuple = BatchSampler(range(2_2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: Dict = BatchSampler(range(2_2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. __lowerCAmelCase: Optional[Any] = BatchSampler(range(2_0) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: str = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: List[Any] = BatchSampler(range(2_0) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]], [[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is very small. __lowerCAmelCase: int = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: Dict = BatchSampler(range(2) , batch_size=3 , drop_last=UpperCamelCase__) __lowerCAmelCase: Tuple = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__) def lowercase_ ( self : int)-> Tuple: '''simple docstring''' __lowerCAmelCase: List[Any] = BatchSampler(range(2_4) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[int] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: List[Any] = BatchSampler(range(2_4) , batch_size=4 , drop_last=UpperCamelCase__) # Expected shouldn't change self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size. __lowerCAmelCase: str = BatchSampler(range(2_2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[int] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: int = BatchSampler(range(2_2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: str = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is not a round multiple of batch size or num_processes. __lowerCAmelCase: Dict = BatchSampler(range(2_1) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: List[str] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: Tuple = BatchSampler(range(2_1) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Optional[Any] = [ [[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]], [[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]], ] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) # Check the shards when the dataset is very small. __lowerCAmelCase: Tuple = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Dict = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) __lowerCAmelCase: str = BatchSampler(range(2) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = [[], []] self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__) def lowercase_ ( self : Union[str, Any])-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]] __lowerCAmelCase: Tuple = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__) for i in range(2)] self.assertEqual(len(batch_sampler_shards[0]) , 3) self.assertEqual(len(batch_sampler_shards[1]) , 2) self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]]) self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 1_0, 1_1]]) def lowercase_ ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=False)-> Union[str, Any]: '''simple docstring''' random.seed(UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = list(UpperCamelCase__) __lowerCAmelCase: List[str] = [ IterableDatasetShard( UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , ) for i in range(UpperCamelCase__) ] __lowerCAmelCase: Tuple = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCamelCase__) iterable_dataset_lists.append(list(UpperCamelCase__)) __lowerCAmelCase: Optional[int] = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size __lowerCAmelCase: int = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCamelCase__) , len(UpperCamelCase__)) self.assertTrue(len(UpperCamelCase__) % shard_batch_size == 0) __lowerCAmelCase: Optional[int] = [] for idx in range(0 , len(UpperCamelCase__) , UpperCamelCase__): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCamelCase__) < len(UpperCamelCase__): reference += reference self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__)]) def lowercase_ ( self : int)-> Any: '''simple docstring''' __lowerCAmelCase: List[Any] = 4_2 __lowerCAmelCase: Optional[Any] = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) # Edge case with a very small dataset __lowerCAmelCase: int = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__) def lowercase_ ( self : Dict)-> Any: '''simple docstring''' __lowerCAmelCase: Any = BatchSampler(range(1_6) , batch_size=4 , drop_last=UpperCamelCase__) __lowerCAmelCase: int = SkipBatchSampler(UpperCamelCase__ , 2) self.assertListEqual(list(UpperCamelCase__) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]]) def lowercase_ ( self : Optional[Any])-> Dict: '''simple docstring''' __lowerCAmelCase: List[Any] = SkipDataLoader(list(range(1_6)) , batch_size=4 , skip_batches=2) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]]) def lowercase_ ( self : Optional[Any])-> Optional[int]: '''simple docstring''' __lowerCAmelCase: List[str] = DataLoader(list(range(1_6)) , batch_size=4) __lowerCAmelCase: str = skip_first_batches(UpperCamelCase__ , num_batches=2) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]]) def lowercase_ ( self : str)-> str: '''simple docstring''' __lowerCAmelCase: Any = DataLoaderShard(list(range(1_6)) , batch_size=4) for idx, _ in enumerate(UpperCamelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) def lowercase_ ( self : str)-> int: '''simple docstring''' Accelerator() __lowerCAmelCase: List[str] = DataLoaderDispatcher(range(1_6) , batch_size=4) for idx, _ in enumerate(UpperCamelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(UpperCamelCase__): self.assertEqual(dataloader.end_of_dataloader , idx == 3)
108
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __A = { "configuration_speecht5": [ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP", "SpeechT5Config", "SpeechT5HifiGanConfig", ], "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"], "processing_speecht5": ["SpeechT5Processor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["SpeechT5Tokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST", "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
108
1
from collections import namedtuple __snake_case :Tuple = namedtuple('''from_to''', '''from_ to''') __snake_case :Dict = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_0_1, 1000), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2), '''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5), '''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7), '''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5), } def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if from_type not in METRIC_CONVERSION: raise ValueError( f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n' + ''', '''.join(_UpperCAmelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n' + ''', '''.join(_UpperCAmelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
49
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _A : UpperCamelCase__ : Optional[Union[str, Path]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = False UpperCamelCase__ : bool = True UpperCamelCase__ : Optional[int] = None UpperCamelCase__ : int = 1 UpperCamelCase__ : Optional[Union[str, bool]] = None UpperCamelCase__ : bool = False UpperCamelCase__ : Optional[Dict] = None UpperCamelCase__ : Optional[str] = None def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE) for k, v in self.__dict__.items()})
49
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
40
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCAmelCase__ : def __init__( self : Any , _lowerCamelCase : Optional[Any] , ): _snake_case = parent _snake_case = 13 _snake_case = 7 _snake_case = 30 _snake_case = self.seq_length + self.mem_len _snake_case = 15 _snake_case = True _snake_case = True _snake_case = 99 _snake_case = [10, 50, 80] _snake_case = 32 _snake_case = 32 _snake_case = 4 _snake_case = 8 _snake_case = 128 _snake_case = 2 _snake_case = 2 _snake_case = None _snake_case = 1 _snake_case = 0 _snake_case = 3 _snake_case = self.vocab_size - 1 _snake_case = 0.0_1 def lowercase ( self : Optional[int] ): _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def lowercase ( self : Any ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def lowercase ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ): _snake_case = TFTransfoXLModel(_lowerCamelCase ) _snake_case , _snake_case = model(_lowerCamelCase ).to_tuple() _snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a} _snake_case , _snake_case = model(_lowerCamelCase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ): _snake_case = TFTransfoXLLMHeadModel(_lowerCamelCase ) _snake_case , _snake_case = model(_lowerCamelCase ).to_tuple() _snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels} _snake_case , _snake_case = model(_lowerCamelCase ).to_tuple() _snake_case , _snake_case = model([input_ids_a, mems_a] ).to_tuple() _snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} _snake_case , _snake_case = model(_lowerCamelCase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ): _snake_case = TFTransfoXLForSequenceClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : str ): _snake_case = self.prepare_config_and_inputs() ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs _snake_case = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) __a = () if is_tf_available() else () __a = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def lowercase ( self : List[Any] ): _snake_case = TFTransfoXLModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 ) def lowercase ( self : List[str] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): self.model_tester.set_seed() _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase ) def lowercase ( self : str ): self.model_tester.set_seed() _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase ) def lowercase ( self : str ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase ) def lowercase ( self : str ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: _snake_case = model.get_output_embeddings() assert isinstance(_lowerCamelCase , tf.keras.layers.Layer ) _snake_case = model.get_bias() assert name is None else: _snake_case = model.get_output_embeddings() assert x is None _snake_case = model.get_bias() assert name is None def lowercase ( self : Optional[Any] ): # TODO JP: Make TransfoXL XLA compliant pass @slow def lowercase ( self : int ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFTransfoXLModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def lowercase ( self : int ): pass @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def lowercase ( self : List[Any] ): _snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off _snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off _snake_case = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> _snake_case = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase ) self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
40
1
def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = len(_A ) snake_case_ = len(matrix[0] ) snake_case_ = min(_A , _A ) for row in range(_A ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , _A ): snake_case_ = matrix[col][row] / matrix[row][row] for i in range(_A , _A ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows snake_case_ = True for i in range(row + 1 , _A ): if matrix[i][row] != 0: snake_case_ , snake_case_ = matrix[i], matrix[row] snake_case_ = False break if reduce: rank -= 1 for i in range(_A ): snake_case_ = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
187
from timeit import timeit lowercase__ : Union[str, Any] = { "MALAYALAM": True, "String": False, "rotor": True, "level": True, "A": True, "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = 0 snake_case_ = len(_A ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = len(_A ) // 2 snake_case_ = len(_A ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(_A ) ) def lowerCamelCase__ ( _A ): '''simple docstring''' if len(_A ) <= 2: return True if s[0] == s[len(_A ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def lowerCamelCase__ ( _A ): '''simple docstring''' return s == s[::-1] def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = f"all({name}(key) is value for key, value in test_data.items())" snake_case_ = f"from __main__ import test_data, {name}" snake_case_ = 500000 snake_case_ = timeit(stmt=_A , setup=_A , number=_A ) print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f'''{key:21} {value}''') print("a man a plan a canal panama") # finished 500,000 runs in 0.46793 seconds benchmark_function("is_palindrome_slice") # finished 500,000 runs in 0.85234 seconds benchmark_function("is_palindrome") # finished 500,000 runs in 1.32028 seconds benchmark_function("is_palindrome_recursive") # finished 500,000 runs in 2.08679 seconds benchmark_function("is_palindrome_traversal")
187
1
import numpy as np def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: np.array ) -> np.array: return 1 / (1 + np.exp(-vector )) def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: np.array ) -> np.array: return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
366
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE_ = 'ResNetConfig' # Base docstring SCREAMING_SNAKE_CASE_ = 'microsoft/resnet-50' SCREAMING_SNAKE_CASE_ = [1, 2048, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE_ = 'microsoft/resnet-50' SCREAMING_SNAKE_CASE_ = 'tiger cat' SCREAMING_SNAKE_CASE_ = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class a ( nn.Module ): def __init__( self , A_ , A_ , A_ = 3 , A_ = 1 , A_ = "relu" ): '''simple docstring''' super().__init__() _UpperCAmelCase : Union[str, Any] = nn.Convad( A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_ ) _UpperCAmelCase : List[Any] = nn.BatchNormad(A_ ) _UpperCAmelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity() def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : List[Any] = self.convolution(A_ ) _UpperCAmelCase : Optional[int] = self.normalization(A_ ) _UpperCAmelCase : Optional[Any] = self.activation(A_ ) return hidden_state class a ( nn.Module ): def __init__( self , A_ ): '''simple docstring''' super().__init__() _UpperCAmelCase : Any = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCAmelCase : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCAmelCase : List[Any] = config.num_channels def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : int = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) _UpperCAmelCase : int = self.embedder(A_ ) _UpperCAmelCase : int = self.pooler(A_ ) return embedding class a ( nn.Module ): def __init__( self , A_ , A_ , A_ = 2 ): '''simple docstring''' super().__init__() _UpperCAmelCase : Union[str, Any] = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ ) _UpperCAmelCase : Optional[int] = nn.BatchNormad(A_ ) def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : str = self.convolution(A_ ) _UpperCAmelCase : List[str] = self.normalization(A_ ) return hidden_state class a ( nn.Module ): def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" ): '''simple docstring''' super().__init__() _UpperCAmelCase : Optional[int] = in_channels != out_channels or stride != 1 _UpperCAmelCase : Dict = ( ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase : int = nn.Sequential( ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , activation=A_ ) , ) _UpperCAmelCase : Dict = ACTaFN[activation] def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : Optional[Any] = hidden_state _UpperCAmelCase : Any = self.layer(A_ ) _UpperCAmelCase : Optional[int] = self.shortcut(A_ ) hidden_state += residual _UpperCAmelCase : Optional[int] = self.activation(A_ ) return hidden_state class a ( nn.Module ): def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" , A_ = 4 ): '''simple docstring''' super().__init__() _UpperCAmelCase : Optional[Any] = in_channels != out_channels or stride != 1 _UpperCAmelCase : Optional[int] = out_channels // reduction _UpperCAmelCase : List[str] = ( ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity() ) _UpperCAmelCase : Dict = nn.Sequential( ResNetConvLayer(A_ , A_ , kernel_size=1 ) , ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , ) _UpperCAmelCase : List[str] = ACTaFN[activation] def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : List[Any] = hidden_state _UpperCAmelCase : List[str] = self.layer(A_ ) _UpperCAmelCase : List[str] = self.shortcut(A_ ) hidden_state += residual _UpperCAmelCase : Dict = self.activation(A_ ) return hidden_state class a ( nn.Module ): def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , ): '''simple docstring''' super().__init__() _UpperCAmelCase : Any = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer _UpperCAmelCase : Optional[Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(A_ , A_ , stride=A_ , activation=config.hidden_act ) , *[layer(A_ , A_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def _UpperCAmelCase ( self , A_ ): '''simple docstring''' _UpperCAmelCase : List[Any] = input for layer in self.layers: _UpperCAmelCase : Optional[Any] = layer(A_ ) return hidden_state class a ( nn.Module ): def __init__( self , A_ ): '''simple docstring''' super().__init__() _UpperCAmelCase : Any = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ): self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_ ) ) def _UpperCAmelCase ( self , A_ , A_ = False , A_ = True ): '''simple docstring''' _UpperCAmelCase : List[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase : Dict = hidden_states + (hidden_state,) _UpperCAmelCase : str = stage_module(A_ ) if output_hidden_states: _UpperCAmelCase : int = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=A_ , hidden_states=A_ , ) class a ( UpperCAmelCase ): _lowercase = ResNetConfig _lowercase = "resnet" _lowercase = "pixel_values" _lowercase = True def _UpperCAmelCase ( self , A_ ): '''simple docstring''' if isinstance(A_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _UpperCAmelCase ( self , A_ , A_=False ): '''simple docstring''' if isinstance(A_ , A_ ): _UpperCAmelCase : Optional[Any] = value SCREAMING_SNAKE_CASE_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , UpperCAmelCase , ) class a ( UpperCAmelCase ): def __init__( self , A_ ): '''simple docstring''' super().__init__(A_ ) _UpperCAmelCase : List[str] = config _UpperCAmelCase : Any = ResNetEmbeddings(A_ ) _UpperCAmelCase : str = ResNetEncoder(A_ ) _UpperCAmelCase : Any = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None ): '''simple docstring''' _UpperCAmelCase : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : List[Any] = self.embedder(A_ ) _UpperCAmelCase : str = self.encoder( A_ , output_hidden_states=A_ , return_dict=A_ ) _UpperCAmelCase : List[Any] = encoder_outputs[0] _UpperCAmelCase : int = self.pooler(A_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase , ) class a ( UpperCAmelCase ): def __init__( self , A_ ): '''simple docstring''' super().__init__(A_ ) _UpperCAmelCase : Optional[int] = config.num_labels _UpperCAmelCase : str = ResNetModel(A_ ) # classification head _UpperCAmelCase : int = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _UpperCAmelCase ( self , A_ = None , A_ = None , A_ = None , A_ = None , ): '''simple docstring''' _UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Tuple = self.resnet(A_ , output_hidden_states=A_ , return_dict=A_ ) _UpperCAmelCase : Optional[int] = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase : int = self.classifier(A_ ) _UpperCAmelCase : Dict = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase : Optional[Any] = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase : Optional[Any] = "single_label_classification" else: _UpperCAmelCase : Any = "multi_label_classification" if self.config.problem_type == "regression": _UpperCAmelCase : str = MSELoss() if self.num_labels == 1: _UpperCAmelCase : Any = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase : Optional[int] = loss_fct(A_ , A_ ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase : Any = CrossEntropyLoss() _UpperCAmelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase : Any = BCEWithLogitsLoss() _UpperCAmelCase : Tuple = loss_fct(A_ , A_ ) if not return_dict: _UpperCAmelCase : Any = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , UpperCAmelCase , ) class a ( UpperCAmelCase , UpperCAmelCase ): def __init__( self , A_ ): '''simple docstring''' super().__init__(A_ ) super()._init_backbone(A_ ) _UpperCAmelCase : Optional[int] = [config.embedding_size] + config.hidden_sizes _UpperCAmelCase : str = ResNetEmbeddings(A_ ) _UpperCAmelCase : List[Any] = ResNetEncoder(A_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(A_ ) @replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC ) def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None ): '''simple docstring''' _UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : Tuple = self.embedder(A_ ) _UpperCAmelCase : Optional[int] = self.encoder(A_ , output_hidden_states=A_ , return_dict=A_ ) _UpperCAmelCase : Optional[int] = outputs.hidden_states _UpperCAmelCase : Any = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCAmelCase : Union[str, Any] = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
189
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ : List[str] = logging.get_logger(__name__) def a_ ( lowerCamelCase ): UpperCAmelCase__ = DPTConfig(embedding_type='hybrid' ) if "large" in checkpoint_url: UpperCAmelCase__ = 1_0_2_4 UpperCAmelCase__ = 4_0_9_6 UpperCAmelCase__ = 2_4 UpperCAmelCase__ = 1_6 UpperCAmelCase__ = [5, 1_1, 1_7, 2_3] UpperCAmelCase__ = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4] UpperCAmelCase__ = (1, 3_8_4, 3_8_4) if "nyu" or "midas" in checkpoint_url: UpperCAmelCase__ = 7_6_8 UpperCAmelCase__ = [1, 1, 1, 0.5] UpperCAmelCase__ = [2_5_6, 5_1_2, 7_6_8, 7_6_8] UpperCAmelCase__ = 1_5_0 UpperCAmelCase__ = 1_6 UpperCAmelCase__ = (1, 3_8_4, 3_8_4) UpperCAmelCase__ = False UpperCAmelCase__ = 'project' if "ade" in checkpoint_url: UpperCAmelCase__ = True UpperCAmelCase__ = 7_6_8 UpperCAmelCase__ = [1, 1, 1, 0.5] UpperCAmelCase__ = 1_5_0 UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 'huggingface/label-files' UpperCAmelCase__ = 'ade20k-id2label.json' UpperCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) ) , 'r' ) ) UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase__ = idalabel UpperCAmelCase__ = {v: k for k, v in idalabel.items()} UpperCAmelCase__ = [1, 1_5_0, 4_8_0, 4_8_0] return config, expected_shape def a_ ( lowerCamelCase ): UpperCAmelCase__ = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) def a_ ( lowerCamelCase ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): UpperCAmelCase__ = name.replace('pretrained.model' , 'dpt.encoder' ) if "pretrained.model" in name: UpperCAmelCase__ = name.replace('pretrained.model' , 'dpt.embeddings' ) if "patch_embed" in name: UpperCAmelCase__ = name.replace('patch_embed' , '' ) if "pos_embed" in name: UpperCAmelCase__ = name.replace('pos_embed' , 'position_embeddings' ) if "attn.proj" in name: UpperCAmelCase__ = name.replace('attn.proj' , 'attention.output.dense' ) if "proj" in name and "project" not in name: UpperCAmelCase__ = name.replace('proj' , 'projection' ) if "blocks" in name: UpperCAmelCase__ = name.replace('blocks' , 'layer' ) if "mlp.fc1" in name: UpperCAmelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: UpperCAmelCase__ = name.replace('mlp.fc2' , 'output.dense' ) if "norm1" in name and "backbone" not in name: UpperCAmelCase__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name and "backbone" not in name: UpperCAmelCase__ = name.replace('norm2' , 'layernorm_after' ) if "scratch.output_conv" in name: UpperCAmelCase__ = name.replace('scratch.output_conv' , 'head' ) if "scratch" in name: UpperCAmelCase__ = name.replace('scratch' , 'neck' ) if "layer1_rn" in name: UpperCAmelCase__ = name.replace('layer1_rn' , 'convs.0' ) if "layer2_rn" in name: UpperCAmelCase__ = name.replace('layer2_rn' , 'convs.1' ) if "layer3_rn" in name: UpperCAmelCase__ = name.replace('layer3_rn' , 'convs.2' ) if "layer4_rn" in name: UpperCAmelCase__ = name.replace('layer4_rn' , 'convs.3' ) if "refinenet" in name: UpperCAmelCase__ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 UpperCAmelCase__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: UpperCAmelCase__ = name.replace('out_conv' , 'projection' ) if "resConfUnit1" in name: UpperCAmelCase__ = name.replace('resConfUnit1' , 'residual_layer1' ) if "resConfUnit2" in name: UpperCAmelCase__ = name.replace('resConfUnit2' , 'residual_layer2' ) if "conv1" in name: UpperCAmelCase__ = name.replace('conv1' , 'convolution1' ) if "conv2" in name: UpperCAmelCase__ = name.replace('conv2' , 'convolution2' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' ) if "pretrained.act_postprocess2.0.project.0" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' ) if "pretrained.act_postprocess3.0.project.0" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' ) if "pretrained.act_postprocess4.0.project.0" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' ) # resize blocks if "pretrained.act_postprocess1.3" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' ) if "pretrained.act_postprocess1.4" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' ) if "pretrained.act_postprocess2.3" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' ) if "pretrained.act_postprocess2.4" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' ) if "pretrained.act_postprocess3.3" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' ) if "pretrained.act_postprocess4.3" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' ) if "pretrained.act_postprocess4.4" in name: UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' ) if "pretrained" in name: UpperCAmelCase__ = name.replace('pretrained' , 'dpt' ) if "bn" in name: UpperCAmelCase__ = name.replace('bn' , 'batch_norm' ) if "head" in name: UpperCAmelCase__ = name.replace('head' , 'head.head' ) if "encoder.norm" in name: UpperCAmelCase__ = name.replace('encoder.norm' , 'layernorm' ) if "auxlayer" in name: UpperCAmelCase__ = name.replace('auxlayer' , 'auxiliary_head.head' ) if "backbone" in name: UpperCAmelCase__ = name.replace('backbone' , 'backbone.bit.encoder' ) if ".." in name: UpperCAmelCase__ = name.replace('..' , '.' ) if "stem.conv" in name: UpperCAmelCase__ = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: UpperCAmelCase__ = name.replace('blocks' , 'layers' ) if "convolution" in name and "backbone" in name: UpperCAmelCase__ = name.replace('convolution' , 'conv' ) if "layer" in name and "backbone" in name: UpperCAmelCase__ = name.replace('layer' , 'layers' ) if "backbone.bit.encoder.bit" in name: UpperCAmelCase__ = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' ) if "embedder.conv" in name: UpperCAmelCase__ = name.replace('embedder.conv' , 'embedder.convolution' ) if "backbone.bit.encoder.stem.norm" in name: UpperCAmelCase__ = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' ) return name def a_ ( lowerCamelCase , lowerCamelCase ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) UpperCAmelCase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase__ = in_proj_weight[: config.hidden_size, :] UpperCAmelCase__ = in_proj_bias[: config.hidden_size] UpperCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase__ = in_proj_bias[-config.hidden_size :] def a_ ( ): UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ) return im @torch.no_grad() def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ , UpperCAmelCase__ = get_dpt_config(lowerCamelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") UpperCAmelCase__ = torch.load(lowerCamelCase , map_location='cpu' ) # remove certain keys remove_ignore_keys_(lowerCamelCase ) # rename keys for key in state_dict.copy().keys(): UpperCAmelCase__ = state_dict.pop(lowerCamelCase ) UpperCAmelCase__ = val # read in qkv matrices read_in_q_k_v(lowerCamelCase , lowerCamelCase ) # load HuggingFace model UpperCAmelCase__ = DPTForSemanticSegmentation(lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase ) model.load_state_dict(lowerCamelCase ) model.eval() # Check outputs on an image UpperCAmelCase__ = 4_8_0 if 'ade' in checkpoint_url else 3_8_4 UpperCAmelCase__ = DPTImageProcessor(size=lowerCamelCase ) UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(lowerCamelCase , return_tensors='pt' ) # forward pass UpperCAmelCase__ = model(**lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**lowerCamelCase ).predicted_depth if show_prediction: UpperCAmelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowerCamelCase , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show() if pytorch_dump_folder_path is not None: Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase ) if push_to_hub: model.push_to_hub('ybelkada/dpt-hybrid-midas' ) image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' ) if __name__ == "__main__": lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) lowerCAmelCase__ : List[str] = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
98
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType UpperCamelCase__ = get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) __lowerCAmelCase = {"model": state_dict} dist_cp.save_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Model saved to {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return __lowerCAmelCase = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: __lowerCAmelCase = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading model from {input_model_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) __lowerCAmelCase = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , ) __lowerCAmelCase = state_dict["model"] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0 ): os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): __lowerCAmelCase = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , ) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: __lowerCAmelCase = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: __lowerCAmelCase = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: __lowerCAmelCase = ( os.path.join(SCREAMING_SNAKE_CASE_ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) __lowerCAmelCase = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , ) __lowerCAmelCase = optim_state["optimizer"] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) __lowerCAmelCase = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
92
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _UpperCamelCase: List[str] = logging.get_logger(__name__) class a__ ( SCREAMING_SNAKE_CASE__ ): def __init__( self : List[str], *lowerCAmelCase : Tuple, **lowerCAmelCase : Union[str, Any] ) -> None: warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
350
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( _UpperCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase : List[Any] = np.inf def set_batch_size(_UpperCAmelCase ) -> None: nonlocal batch_size if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase : Any = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase : Dict = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary": lowercase : int = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_UpperCAmelCase , _UpperCAmelCase ) return None if batch_size is np.inf else batch_size class a__ ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Union[str, Any], lowerCAmelCase : NestedDataStructureLike[PathLike], lowerCAmelCase : Optional[NamedSplit] = None, lowerCAmelCase : Optional[Features] = None, lowerCAmelCase : str = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : int, ) -> List[Any]: super().__init__( lowerCAmelCase, split=lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, streaming=lowerCAmelCase, num_proc=lowerCAmelCase, **lowerCAmelCase, ) lowercase : str = path_or_paths if isinstance(lowerCAmelCase, lowerCAmelCase ) else {self.split: path_or_paths} lowercase : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1] lowercase : Optional[int] = Parquet( cache_dir=lowerCAmelCase, data_files=lowerCAmelCase, features=lowerCAmelCase, hash=lowerCAmelCase, **lowerCAmelCase, ) def lowercase ( self : Optional[int] ) -> Union[str, Any]: # Build iterable dataset if self.streaming: lowercase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase : Tuple = None lowercase : Union[str, Any] = None lowercase : List[Any] = None lowercase : int = None self.builder.download_and_prepare( download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, num_proc=self.num_proc, ) lowercase : Any = self.builder.as_dataset( split=self.split, verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory ) return dataset class a__ : def __init__( self : Dict, lowerCAmelCase : Dataset, lowerCAmelCase : Union[PathLike, BinaryIO], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : Optional[Any], ) -> Optional[Any]: lowercase : List[Any] = dataset lowercase : int = path_or_buf lowercase : Optional[Any] = batch_size or get_writer_batch_size(dataset.features ) lowercase : Optional[Any] = parquet_writer_kwargs def lowercase ( self : Union[str, Any] ) -> int: lowercase : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ): with open(self.path_or_buf, 'wb+' ) as buffer: lowercase : int = self._write(file_obj=lowerCAmelCase, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs ) else: lowercase : List[Any] = self._write(file_obj=self.path_or_buf, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs ) return written def lowercase ( self : int, lowerCAmelCase : BinaryIO, lowerCAmelCase : int, **lowerCAmelCase : Union[str, Any] ) -> int: lowercase : Optional[Any] = 0 lowercase : int = parquet_writer_kwargs.pop('path_or_buf', lowerCAmelCase ) lowercase : List[str] = self.dataset.features.arrow_schema lowercase : int = pq.ParquetWriter(lowerCAmelCase, schema=lowerCAmelCase, **lowerCAmelCase ) for offset in logging.tqdm( range(0, len(self.dataset ), lowerCAmelCase ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ): lowercase : Tuple = query_table( table=self.dataset._data, key=slice(lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, ) writer.write_table(lowerCAmelCase ) written += batch.nbytes writer.close() return written
53
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if len(_lowerCAmelCase ) <= 1: return [tuple(_lowerCAmelCase )] lowerCAmelCase__ : Tuple = [] def generate(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[str] = [0] * n res.append(tuple(_lowerCAmelCase ) ) lowerCAmelCase__ : int = 0 while i < n: if c[i] < i: if i % 2 == 0: lowerCAmelCase__ : str = arr[i], arr[0] else: lowerCAmelCase__ : List[str] = arr[i], arr[c[i]] res.append(tuple(_lowerCAmelCase ) ) c[i] += 1 lowerCAmelCase__ : Tuple = 0 else: lowerCAmelCase__ : Dict = 0 i += 1 generate(len(_lowerCAmelCase ) , _lowerCAmelCase ) return res if __name__ == "__main__": _lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() _lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
37
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=0 ) -> Any: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any=float("inf" ) ) -> int: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): A_ : Tuple = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=float("inf" ) ) -> Dict: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): A_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: A_ : Union[str, Any] = current_dis return min_dis def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion A_ : Optional[int] = points_counts // 2 A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) A_ : List[Any] = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) A_ : Tuple = min(_lowerCAmelCase , _lowerCAmelCase ) A_ : Dict = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) A_ : Tuple = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[Any] = column_based_sort(_lowerCAmelCase , column=0 ) A_ : Optional[int] = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": _lowerCAmelCase : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
300
0
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def UpperCAmelCase__ ( ) -> tuple[list[int], int]: A_ = [randint(-10_00, 10_00 ) for i in range(10 )] A_ = randint(-50_00, 50_00 ) return (arr, r) __lowerCamelCase = make_dataset() def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase__, 3 ): if sum(UpperCAmelCase__ ) == target: return tuple(sorted(UpperCAmelCase__ ) ) return (0, 0, 0) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> tuple[int, int, int]: arr.sort() A_ = len(UpperCAmelCase__ ) for i in range(n - 1 ): A_ , A_ = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def UpperCAmelCase__ ( ) -> tuple[float, float]: A_ = """ from __main__ import dataset, triplet_sum1, triplet_sum2 """ A_ = """ triplet_sum1(*dataset) """ A_ = """ triplet_sum2(*dataset) """ A_ = repeat(setup=UpperCAmelCase__, stmt=UpperCAmelCase__, repeat=5, number=1_00_00 ) A_ = repeat(setup=UpperCAmelCase__, stmt=UpperCAmelCase__, repeat=5, number=1_00_00 ) return (min(UpperCAmelCase__ ), min(UpperCAmelCase__ )) if __name__ == "__main__": from doctest import testmod testmod() __lowerCamelCase = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
101
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) ) if __name__ == "__main__": print(solution())
101
1
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class __UpperCAmelCase (_UpperCAmelCase ): def __init__( self: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: str=1_024 , UpperCAmelCase_: Union[str, Any]=1_024 , UpperCAmelCase_: Tuple=3.6 ): '''simple docstring''' _SCREAMING_SNAKE_CASE = tokenizer _SCREAMING_SNAKE_CASE = tokenizer.bos_token_id _SCREAMING_SNAKE_CASE = dataset _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = seq_length * chars_per_token * num_of_sequences def __iter__( self: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = iter(self.dataset ) _SCREAMING_SNAKE_CASE = True while more_examples: _SCREAMING_SNAKE_CASE = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(SCREAMING_SNAKE_CASE__ )["""content"""] ) buffer_len += len(buffer[-1] ) except StopIteration: _SCREAMING_SNAKE_CASE = False break _SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids'] _SCREAMING_SNAKE_CASE = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , self.seq_length ): _SCREAMING_SNAKE_CASE = all_token_ids[i : i + self.seq_length] if len(SCREAMING_SNAKE_CASE__ ) == self.seq_length: yield torch.tensor(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( snake_case__ ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE = {'streaming': True} _SCREAMING_SNAKE_CASE = load_dataset(args.dataset_name ,split="""train""" ,**lowerCamelCase__ ) _SCREAMING_SNAKE_CASE = ConstantLengthDataset(lowerCamelCase__ ,lowerCamelCase__ ,seq_length=args.seq_length ) _SCREAMING_SNAKE_CASE = DataLoader(lowerCamelCase__ ,batch_size=args.batch_size ) return eval_dataloader def __lowerCamelCase ( snake_case__ ) -> str: """simple docstring""" model.eval() _SCREAMING_SNAKE_CASE = [] for step, batch in enumerate(lowerCamelCase__ ): with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,labels=lowerCamelCase__ ) _SCREAMING_SNAKE_CASE = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(lowerCamelCase__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _SCREAMING_SNAKE_CASE = torch.mean(torch.cat(lowerCamelCase__ ) ) try: _SCREAMING_SNAKE_CASE = torch.exp(lowerCamelCase__ ) except OverflowError: _SCREAMING_SNAKE_CASE = float("""inf""" ) return loss.item(), perplexity.item() # Setup Accelerator UpperCamelCase = Accelerator() # Parse configuration UpperCamelCase = HfArgumentParser(EvaluationArguments) UpperCamelCase = parser.parse_args() set_seed(args.seed) # Logging UpperCamelCase = logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader UpperCamelCase = create_dataloader(args) # Prepare everything with our `accelerator`. UpperCamelCase , UpperCamelCase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') UpperCamelCase , UpperCamelCase = evaluate(args) logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
306
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a ={ """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } a =logging.get_logger(__name__) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Dict = '''mask2former''' _UpperCAmelCase : Dict = ['''swin'''] _UpperCAmelCase : Optional[int] = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 2_5_5 ,SCREAMING_SNAKE_CASE__ : int = 1_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 1_2_5_4_4 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 1_6, 3_2] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,): if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.') __lowerCamelCase : Optional[Any] = CONFIG_MAPPING['swin']( image_size=2_2_4 ,in_channels=3 ,patch_size=4 ,embed_dim=9_6 ,depths=[2, 2, 1_8, 2] ,num_heads=[3, 6, 1_2, 2_4] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Union[str, Any] = backbone_config.pop('model_type') __lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type] __lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " F"Supported model types: {','.join(self.backbones_supported)}") __lowerCamelCase : Dict = backbone_config __lowerCamelCase : int = feature_size __lowerCamelCase : List[str] = mask_feature_size __lowerCamelCase : int = hidden_dim __lowerCamelCase : str = encoder_feedforward_dim __lowerCamelCase : Optional[int] = activation_function __lowerCamelCase : int = encoder_layers __lowerCamelCase : List[Any] = decoder_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Tuple = dropout __lowerCamelCase : Dict = dim_feedforward __lowerCamelCase : Union[str, Any] = pre_norm __lowerCamelCase : List[str] = enforce_input_projection __lowerCamelCase : Optional[int] = common_stride __lowerCamelCase : Dict = ignore_value __lowerCamelCase : Optional[Any] = num_queries __lowerCamelCase : int = no_object_weight __lowerCamelCase : Optional[Any] = class_weight __lowerCamelCase : str = mask_weight __lowerCamelCase : List[str] = dice_weight __lowerCamelCase : Dict = train_num_points __lowerCamelCase : Optional[int] = oversample_ratio __lowerCamelCase : Optional[Any] = importance_sample_ratio __lowerCamelCase : List[Any] = init_std __lowerCamelCase : Tuple = init_xavier_std __lowerCamelCase : Union[str, Any] = use_auxiliary_loss __lowerCamelCase : List[Any] = feature_strides __lowerCamelCase : Any = output_auxiliary_logits __lowerCamelCase : List[Any] = decoder_layers super().__init__(**SCREAMING_SNAKE_CASE__) @classmethod def lowerCAmelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Tuple): return cls( backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) def lowerCAmelCase ( self : str): __lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__) __lowerCamelCase : List[Any] = self.backbone_config.to_dict() __lowerCamelCase : Union[str, Any] = self.__class__.model_type return output
73
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
357
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _SCREAMING_SNAKE_CASE = { '''169M''': 1_2, '''430M''': 2_4, '''1B5''': 2_4, '''3B''': 3_2, '''7B''': 3_2, '''14B''': 4_0, } _SCREAMING_SNAKE_CASE = { '''169M''': 7_6_8, '''430M''': 1_0_2_4, '''1B5''': 2_0_4_8, '''3B''': 2_5_6_0, '''7B''': 4_0_9_6, '''14B''': 5_1_2_0, } def _lowerCAmelCase ( lowerCamelCase_ : Dict ): __lowercase = list(state_dict.keys() ) for name in state_dict_keys: __lowercase = state_dict.pop(lowerCamelCase_ ) # emb -> embedding if name.startswith('''emb.''' ): __lowercase = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): __lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention __lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ ) # ffn -> feed_forward __lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): __lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): __lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): __lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": __lowercase = '''rwkv.''' + name __lowercase = weight return state_dict def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) __lowercase = 5_0_2_7_7 __lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: __lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ ) __lowercase = len(lowerCamelCase_ ) tokenizer.save_pretrained(lowerCamelCase_ ) # 2. Build the config __lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: __lowercase = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." ) __lowercase = RwkvConfig( vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(lowerCamelCase_ ) # 3. Download model file then convert state_dict __lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) __lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' ) __lowercase = convert_state_dict(lowerCamelCase_ ) # 4. Split in shards and save __lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ ) for shard_file, shard in shards.items(): torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) if index is not None: __lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) # Save the index as well with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: __lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n''' f.write(lowerCamelCase_ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) __lowercase = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: __lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) __lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ ) model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
217
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Any = logging.get_logger(__name__) _UpperCAmelCase : str = { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """speech_to_text""" UpperCAmelCase__ = ["""past_key_values"""] UpperCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] , UpperCAmelCase : str=10000 , UpperCAmelCase : int=12 , UpperCAmelCase : Optional[int]=2048 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Dict=2048 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[str]="relu" , UpperCAmelCase : Any=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , UpperCAmelCase : int=6000 , UpperCAmelCase : Optional[Any]=1024 , UpperCAmelCase : Any=2 , UpperCAmelCase : int=(5, 5) , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=80 , UpperCAmelCase : Tuple=1 , **UpperCAmelCase : Dict , ) -> Any: lowerCamelCase__ : Optional[int] = vocab_size lowerCamelCase__ : List[str] = d_model lowerCamelCase__ : int = encoder_ffn_dim lowerCamelCase__ : Optional[Any] = encoder_layers lowerCamelCase__ : Tuple = encoder_attention_heads lowerCamelCase__ : str = decoder_ffn_dim lowerCamelCase__ : Optional[Any] = decoder_layers lowerCamelCase__ : Dict = decoder_attention_heads lowerCamelCase__ : int = dropout lowerCamelCase__ : str = attention_dropout lowerCamelCase__ : Dict = activation_dropout lowerCamelCase__ : int = activation_function lowerCamelCase__ : Tuple = init_std lowerCamelCase__ : Optional[int] = encoder_layerdrop lowerCamelCase__ : Dict = decoder_layerdrop lowerCamelCase__ : Union[str, Any] = use_cache lowerCamelCase__ : Optional[Any] = encoder_layers lowerCamelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase__ : Optional[int] = max_source_positions lowerCamelCase__ : Optional[int] = max_target_positions lowerCamelCase__ : int = num_conv_layers lowerCamelCase__ : Union[str, Any] = list(UpperCAmelCase ) lowerCamelCase__ : int = conv_channels lowerCamelCase__ : Dict = input_feat_per_channel lowerCamelCase__ : int = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """ F"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) super().__init__( pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
50
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int: lowerCamelCase__ : Optional[int] = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple: lowerCamelCase__ : Tuple = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple: lowerCamelCase__ : Union[str, Any] = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') ) return token def SCREAMING_SNAKE_CASE ( ) -> str: lowerCamelCase__ : str = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json' lowerCamelCase__ : Union[str, Any] = 1000 lowerCamelCase__ : Optional[Any] = 'huggingface/label-files' lowerCamelCase__ : Any = num_labels lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) ) lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : Tuple = idalabel lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()} lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": lowerCamelCase__ : List[Any] = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": lowerCamelCase__ : Dict = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowerCamelCase__ : Optional[Any] = [2, 2, 20] lowerCamelCase__ : Optional[int] = [3, 12, 16] lowerCamelCase__ : str = [192, 768, 1024] lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) lowerCamelCase__ : Tuple = image_size lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) ) lowerCamelCase__ : Optional[int] = OrderedDict() lowerCamelCase__ : Tuple = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : int = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=3_84, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _UpperCAmelCase : List[str] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
50
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["ChineseCLIPFeatureExtractor"] __magic_name__ = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
255
"""simple docstring""" import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=3 , ): __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 2_8_8} __SCREAMING_SNAKE_CASE = size_divisor __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_pad __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution def snake_case_ ( self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False): if not batched: __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image): __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] __SCREAMING_SNAKE_CASE = size / min(lowerCAmelCase__ , lowerCAmelCase__) if h < w: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = size, scale * w else: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = scale * h, size __SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size) if max(lowerCAmelCase__ , lowerCAmelCase__) > max_size: __SCREAMING_SNAKE_CASE = max_size / max(lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = newh * scale __SCREAMING_SNAKE_CASE = neww * scale __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = int(newh + 0.5), int(neww + 0.5) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0] __SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : Tuple = BridgeTowerImageProcessor if is_vision_available() else None def snake_case_ ( self): __SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self) @property def snake_case_ ( self): return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""")) self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""")) self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""")) self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""")) self.assertTrue(hasattr(lowerCAmelCase__ , """size""")) self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""")) def snake_case_ ( self): pass def snake_case_ ( self): # Initialize image processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self): # Initialize image processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case_ ( self): # Initialize image processor __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""").pixel_values __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
255
1
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() def _lowercase (self : List[str] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCAmelCase_ = '''xvjiarui/stable-diffusion-2-inpainting''' UpperCAmelCase_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_ ) UpperCAmelCase_ = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase_ = jax.random.PRNGKey(0 ) UpperCAmelCase_ = 50 UpperCAmelCase_ = jax.device_count() UpperCAmelCase_ = num_samples * [prompt] UpperCAmelCase_ = num_samples * [init_image] UpperCAmelCase_ = num_samples * [mask_image] UpperCAmelCase_ = pipeline.prepare_inputs(A_ , A_ , A_ ) # shard inputs and rng UpperCAmelCase_ = replicate(A_ ) UpperCAmelCase_ = jax.random.split(A_ , jax.device_count() ) UpperCAmelCase_ = shard(A_ ) UpperCAmelCase_ = shard(A_ ) UpperCAmelCase_ = shard(A_ ) UpperCAmelCase_ = pipeline( A_ , A_ , A_ , A_ , A_ , A_ , jit=A_ ) UpperCAmelCase_ = output.images.reshape(A_ , 512 , 512 , 3 ) UpperCAmelCase_ = images[0, 253:256, 253:256, -1] UpperCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase_ = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
1
import string def UpperCamelCase( __UpperCamelCase : str ): for key in range(len(string.ascii_uppercase ) ): lowerCAmelCase_ : List[Any] = '''''' for symbol in message: if symbol in string.ascii_uppercase: lowerCAmelCase_ : Optional[int] = string.ascii_uppercase.find(__UpperCamelCase ) lowerCAmelCase_ : Optional[int] = num - key if num < 0: lowerCAmelCase_ : int = num + len(string.ascii_uppercase ) lowerCAmelCase_ : int = translated + string.ascii_uppercase[num] else: lowerCAmelCase_ : Optional[int] = translated + symbol print(f"""Decryption using Key #{key}: {translated}""" ) def UpperCamelCase( ): lowerCAmelCase_ : Dict = input('''Encrypted message: ''' ) lowerCAmelCase_ : List[str] = message.upper() decrypt(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
103
0
'''simple docstring''' snake_case_ : Union[str, Any] = 9.8_06_65 def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = g ): if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
236
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging snake_case_ : int = logging.get_logger(__name__) snake_case_ : List[str] = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowercase__ ( lowercase ): lowercase__ = """gptj""" lowercase__ = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any ,lowerCamelCase__ : Optional[Any]=50400 ,lowerCamelCase__ : Tuple=2048 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : int=28 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : List[Any]="gelu_new" ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=1E-5 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : str=50256 ,lowerCamelCase__ : Any=50256 ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[Any] ,): '''simple docstring''' _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : Optional[Any] = n_positions _UpperCamelCase : Union[str, Any] = n_embd _UpperCamelCase : Any = n_layer _UpperCamelCase : Optional[int] = n_head _UpperCamelCase : List[str] = n_inner _UpperCamelCase : List[Any] = rotary_dim _UpperCamelCase : int = activation_function _UpperCamelCase : Dict = resid_pdrop _UpperCamelCase : Any = embd_pdrop _UpperCamelCase : Union[str, Any] = attn_pdrop _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : str = use_cache _UpperCamelCase : Union[str, Any] = bos_token_id _UpperCamelCase : Any = eos_token_id super().__init__( bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,tie_word_embeddings=lowerCamelCase__ ,**lowerCamelCase__ ) class lowercase__ ( lowercase ): def __init__( self : Tuple ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ,lowerCamelCase__ : List[PatchingSpec] = None ,lowerCamelCase__ : bool = False ,): '''simple docstring''' super().__init__(lowerCamelCase__ ,task=lowerCamelCase__ ,patching_specs=lowerCamelCase__ ,use_past=lowerCamelCase__ ) if not getattr(self._config ,'pad_token_id' ,lowerCamelCase__ ): # TODO: how to do that better? _UpperCamelCase : int = 0 @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ ,direction='inputs' ) _UpperCamelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'} else: _UpperCamelCase : Any = {0: 'batch', 1: 'sequence'} return common_inputs @property def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' return self._config.n_layer @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return self._config.n_head def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,): '''simple docstring''' _UpperCamelCase : Union[str, Any] = super(lowerCamelCase__ ,self ).generate_dummy_inputs( lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ ) # We need to order the input in the way they appears in the forward() _UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _UpperCamelCase , _UpperCamelCase : str = common_inputs['input_ids'].shape # Not using the same length for past_key_values _UpperCamelCase : Optional[int] = seqlen + 2 _UpperCamelCase : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase : Optional[Any] = [ (torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers ) ] _UpperCamelCase : Union[str, Any] = common_inputs['attention_mask'] if self.use_past: _UpperCamelCase : Any = ordered_inputs['attention_mask'].dtype _UpperCamelCase : List[str] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ ,lowerCamelCase__ ,dtype=lowerCamelCase__ )] ,dim=1 ) return ordered_inputs @property def UpperCamelCase_ ( self : str ): '''simple docstring''' return 13
236
1
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): _A = True from torch.cuda.amp import autocast _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) UpperCAmelCase__ : Optional[bool] = field( default=A_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) UpperCAmelCase__ : Optional[float] = field( default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} ) UpperCAmelCase__ : Optional[float] = field( default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} ) UpperCAmelCase__ : Optional[float] = field( default=0.1 , metadata={ "help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler." } , ) UpperCAmelCase__ : Optional[float] = field( default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , ) UpperCAmelCase__ : Optional[float] = field( default=0.05 , metadata={ "help": ( "Propability of each feature vector along the time axis to be chosen as the start of the vector" "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature" "vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``." ) } , ) UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} ) @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : Optional[str] = field( default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase__ : Optional[str] = field( default="train+validation" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) UpperCAmelCase__ : bool = field( default=A_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={"help": "The number of processes to use for the preprocessing."} , ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase__ : Optional[int] = field( default=A_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) } , ) UpperCAmelCase__ : List[str] = list_field( default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , ) @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : WavaVecaProcessor UpperCAmelCase__ : Union[bool, str] = True UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[int] = None def __call__( self , A_ ) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods __UpperCamelCase =[{'input_values': feature['input_values']} for feature in features] __UpperCamelCase =[{'input_ids': feature['labels']} for feature in features] __UpperCamelCase =self.processor.pad( A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) __UpperCamelCase =self.processor.pad( labels=A_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , ) # replace padding with -100 to ignore loss correctly __UpperCamelCase =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 ) __UpperCamelCase =labels return batch class UpperCAmelCase__ ( A_ ): """simple docstring""" def _a ( self , A_ , A_ ) -> torch.Tensor: model.train() __UpperCamelCase =self._prepare_inputs(A_ ) if self.use_amp: with autocast(): __UpperCamelCase =self.compute_loss(A_ , A_ ) else: __UpperCamelCase =self.compute_loss(A_ , A_ ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __UpperCamelCase =loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __UpperCamelCase =loss.sum() / (inputs['labels'] >= 0).sum() else: raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: __UpperCamelCase =loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A_ ).backward() elif self.use_apex: with amp.scale_loss(A_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A_ ) else: loss.backward() return loss.detach() def _UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses() # Detecting last checkpoint. __UpperCamelCase =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __UpperCamelCase =datasets.load_dataset( 'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name ) __UpperCamelCase =datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' ) # Create and save tokenizer __UpperCamelCase =F'[{"".join(data_args.chars_to_ignore )}]' def remove_special_characters(SCREAMING_SNAKE_CASE__ : Dict ): __UpperCamelCase =re.sub(SCREAMING_SNAKE_CASE__ , '' , batch['sentence'] ).lower() + ' ' return batch __UpperCamelCase =train_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] ) __UpperCamelCase =eval_dataset.map(SCREAMING_SNAKE_CASE__ , remove_columns=['sentence'] ) def extract_all_chars(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =' '.join(batch['text'] ) __UpperCamelCase =list(set(SCREAMING_SNAKE_CASE__ ) ) return {"vocab": [vocab], "all_text": [all_text]} __UpperCamelCase =train_dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , ) __UpperCamelCase =train_dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , ) __UpperCamelCase =list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) ) __UpperCamelCase ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE__ )} __UpperCamelCase =vocab_dict[' '] del vocab_dict[" "] __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) with open('vocab.json' , 'w' ) as vocab_file: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase =WavaVecaCTCTokenizer( 'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , ) __UpperCamelCase =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __UpperCamelCase =min(len(SCREAMING_SNAKE_CASE__ ) , data_args.max_train_samples ) __UpperCamelCase =train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) if data_args.max_val_samples is not None: __UpperCamelCase =eval_dataset.select(range(data_args.max_val_samples ) ) __UpperCamelCase =torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase , __UpperCamelCase =torchaudio.load(batch['path'] ) __UpperCamelCase =resampler(SCREAMING_SNAKE_CASE__ ).squeeze().numpy() __UpperCamelCase =1_60_00 __UpperCamelCase =batch['text'] return batch __UpperCamelCase =train_dataset.map( SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __UpperCamelCase =eval_dataset.map( SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ): # check that all files have the correct sampling rate assert ( len(set(batch['sampling_rate'] ) ) == 1 ), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.' __UpperCamelCase =processor( audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] ) batch.update(SCREAMING_SNAKE_CASE__ ) return batch __UpperCamelCase =train_dataset.map( SCREAMING_SNAKE_CASE__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , ) __UpperCamelCase =eval_dataset.map( SCREAMING_SNAKE_CASE__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , ) # Metric __UpperCamelCase =datasets.load_metric('wer' ) def compute_metrics(SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =pred.predictions __UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=-1 ) __UpperCamelCase =processor.tokenizer.pad_token_id __UpperCamelCase =processor.batch_decode(SCREAMING_SNAKE_CASE__ ) # we do not want to group tokens when computing the metrics __UpperCamelCase =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __UpperCamelCase =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) # Initialize our Trainer __UpperCamelCase =CTCTrainer( model=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __UpperCamelCase =last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __UpperCamelCase =model_args.model_name_or_path else: __UpperCamelCase =None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() __UpperCamelCase =train_result.metrics __UpperCamelCase =( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ ) ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ ) trainer.save_state() # Evaluation __UpperCamelCase ={} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase =trainer.evaluate() __UpperCamelCase =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ ) return results if __name__ == "__main__": main()
62
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : str , a : Optional[Any] , a : int=13 , a : str=7 , a : str=True , a : List[str]=True , a : Optional[Any]=True , a : int=True , a : List[Any]=99 , a : List[Any]=32 , a : Tuple=5 , a : Any=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Any=0.1 , a : int=0.1 , a : List[Any]=128 , a : Union[str, Any]=32 , a : Union[str, Any]=16 , a : Dict=2 , a : List[Any]=0.0_2 , a : Optional[Any]=3 , a : List[Any]=4 , a : Optional[int]=None , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = parent lowerCAmelCase__ : Dict = batch_size lowerCAmelCase__ : Optional[Any] = seq_length lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : str = use_labels lowerCAmelCase__ : Optional[Any] = vocab_size lowerCAmelCase__ : Union[str, Any] = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[Any] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : Any = type_vocab_size lowerCAmelCase__ : Any = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Dict = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : Union[str, Any] = scope def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Tuple = None if self.use_token_type_ids: lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : List[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : List[str] , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = NezhaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a ) lowerCAmelCase__ : List[str] = model(a , token_type_ids=a ) lowerCAmelCase__ : Any = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Tuple , a : List[Any] , a : Tuple , a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[int] = NezhaModel(a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model( a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , ) lowerCAmelCase__ : Dict = model( a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , ) lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : List[Any] , a : str , a : List[str] , a : Tuple , a : List[Any] , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = NezhaForMaskedLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : List[Any] , a : int , a : List[str] , a : Union[str, Any] , a : int , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = NezhaForNextSentencePrediction(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model( a , attention_mask=a , token_type_ids=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _lowerCamelCase ( self : int , a : Optional[int] , a : str , a : List[str] , a : int , a : Dict , a : Optional[Any] , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = NezhaForPreTraining(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model( a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : List[str] , a : Any , a : Any , a : Union[str, Any] , a : Tuple , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = NezhaForQuestionAnswering(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Tuple = model( a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self : Tuple , a : str , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Dict , a : str , a : int ): '''simple docstring''' lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : Optional[Any] = NezhaForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : List[str] , a : Dict , a : str , a : Optional[Any] , a : Optional[int] , a : List[str] , a : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : Dict = self.num_labels lowerCAmelCase__ : str = NezhaForTokenClassification(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self : int , a : Tuple , a : List[Any] , a : Tuple , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Any = NezhaForMultipleChoice(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( a , attention_mask=a , token_type_ids=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Any = config_and_inputs lowerCAmelCase__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _lowerCamelCase ( self : str , a : Tuple , a : int , a : Dict=False ): '''simple docstring''' lowerCAmelCase__ : int = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class in get_values(a ): lowerCAmelCase__ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = NezhaModelTester(self ) lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=37 ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = self.model_tester.prepare_config_and_inputs_for_decoder() lowerCAmelCase__ : str = None self.model_tester.create_and_check_model_as_decoder( a , a , a , a , a , a , a , a , a , ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[Any] = NezhaModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowerCAmelCase__ : Dict = True lowerCAmelCase__ : Any = model_class(config=a ) lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(a , a ) lowerCAmelCase__ : int = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'bert.pt' ) ) lowerCAmelCase__ : Any = torch.jit.load(os.path.join(a , 'bert.pt' ) , map_location=a ) loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) ) @require_torch class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : str = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' ) lowerCAmelCase__ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0] lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0] lowerCAmelCase__ : int = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , a ) lowerCAmelCase__ : List[Any] = torch.tensor( [[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
212
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): def __init__( self : List[str] , **lowerCamelCase_ : Union[str, Any] ): """simple docstring""" super().__init__(**lowerCamelCase_ ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) self.check_model_type(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , **lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = {} UpperCamelCase = {} UpperCamelCase = {} # preprocess args if "points_per_batch" in kwargs: UpperCamelCase = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: UpperCamelCase = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: UpperCamelCase = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: UpperCamelCase = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: UpperCamelCase = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: UpperCamelCase = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: UpperCamelCase = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: UpperCamelCase = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: UpperCamelCase = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: UpperCamelCase = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: UpperCamelCase = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: UpperCamelCase = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Optional[int] , lowerCamelCase_ : Any , *lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Dict ): """simple docstring""" return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=64 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : float = 512 / 1500 , lowerCamelCase_ : Optional[int] = 32 , lowerCamelCase_ : Optional[int] = 1 , ): """simple docstring""" UpperCamelCase = load_image(lowerCamelCase_ ) UpperCamelCase = self.image_processor.size["""longest_edge"""] UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = self.image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": UpperCamelCase = self.get_inference_context() with inference_context(): UpperCamelCase = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device ) UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) UpperCamelCase = image_embeddings UpperCamelCase = grid_points.shape[1] UpperCamelCase = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ): UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :] UpperCamelCase = input_labels[:, i : i + points_per_batch] UpperCamelCase = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]=0.8_8 , lowerCamelCase_ : Optional[int]=0.9_5 , lowerCamelCase_ : int=0 , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = model_inputs.pop("""input_boxes""" ) UpperCamelCase = model_inputs.pop("""is_last""" ) UpperCamelCase = model_inputs.pop("""original_sizes""" ).tolist() UpperCamelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist() UpperCamelCase = self.model(**lowerCamelCase_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks UpperCamelCase = model_outputs["""pred_masks"""] UpperCamelCase = self.image_processor.post_process_masks( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_ ) UpperCamelCase = model_outputs["""iou_scores"""] UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=0.7 , ): """simple docstring""" UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) UpperCamelCase = torch.cat(lowerCamelCase_ ) UpperCamelCase = torch.cat(lowerCamelCase_ ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = defaultdict(lowerCamelCase_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowerCamelCase_ ) UpperCamelCase = {} if output_rle_mask: UpperCamelCase = rle_mask if output_bboxes_mask: UpperCamelCase = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
165
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _SCREAMING_SNAKE_CASE = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def lowercase( UpperCamelCase_ ) -> str: '''simple docstring''' # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model UpperCamelCase = list(s_dict.keys() ) for key in keys: UpperCamelCase = R""".*/layers_(\d+)""" UpperCamelCase = key if re.match(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , UpperCamelCase_ ) UpperCamelCase = R"""(encoder|decoder)\/""" if re.match(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = re.match(UpperCamelCase_ , UpperCamelCase_ ).groups() if groups[0] == "encoder": UpperCamelCase = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , UpperCamelCase_ ) UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , UpperCamelCase_ ) elif groups[0] == "decoder": UpperCamelCase = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , UpperCamelCase_ ) UpperCamelCase = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , UpperCamelCase_ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: UpperCamelCase = new_key.replace(UpperCamelCase_ , UpperCamelCase_ ) print(f"""{key} -> {new_key}""" ) UpperCamelCase = s_dict.pop(UpperCamelCase_ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCamelCase = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: UpperCamelCase = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: UpperCamelCase = s_dict[key].shape[0] UpperCamelCase = s_dict[key] for idx in range(UpperCamelCase_ ): UpperCamelCase = expert_weihts[idx] print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(UpperCamelCase_ ) return s_dict _SCREAMING_SNAKE_CASE = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' # Convert a google style config to the hugging face fromat import regex as re with open(UpperCamelCase_ , """r""" ) as f: UpperCamelCase = f.read() UpperCamelCase = re.findall(R"""(.*) = ([0-9.]*)""" , UpperCamelCase_ ) UpperCamelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": UpperCamelCase = float(UpperCamelCase_ ) if """.""" in value else int(UpperCamelCase_ ) UpperCamelCase = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , UpperCamelCase_ )[0] UpperCamelCase = str(activation[1] ) UpperCamelCase = num_experts UpperCamelCase = SwitchTransformersConfig(**UpperCamelCase_ ) return config def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="./" , UpperCamelCase_=8 ) -> Optional[int]: '''simple docstring''' # Initialise PyTorch model print(f"""Loading flax weights from : {flax_checkpoint_path}""" ) UpperCamelCase = checkpoints.load_tax_checkpoint(UpperCamelCase_ ) if gin_file is not None: UpperCamelCase = convert_gin_to_config(UpperCamelCase_ , UpperCamelCase_ ) else: UpperCamelCase = SwitchTransformersConfig.from_pretrained(UpperCamelCase_ ) UpperCamelCase = SwitchTransformersForConditionalGeneration(UpperCamelCase_ ) UpperCamelCase = flax_params["""target"""] UpperCamelCase = flatten_dict(UpperCamelCase_ , sep="""/""" ) UpperCamelCase = rename_keys(UpperCamelCase_ ) UpperCamelCase = unflatten_dict(UpperCamelCase_ , sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase_ , UpperCamelCase_ ) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") _SCREAMING_SNAKE_CASE = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
165
1
"""simple docstring""" import os from pathlib import Path def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> Optional[Any]: lowerCamelCase_ = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase_ = { 'wmt16-en-de-dist-12-1': [28.3, 27.52], 'wmt16-en-de-dist-6-1': [27.4, 27.11], 'wmt16-en-de-12-1': [26.9, 25.75], } lowerCamelCase_ = F'''{src_lang}-{tgt_lang}''' lowerCamelCase_ = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCamelCase_ = os.path.join(UpperCAmelCase_ , 'README.md' ) print(F'''Generating {path}''' ) with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(UpperCAmelCase_ ) # make sure we are under the root of the project _SCREAMING_SNAKE_CASE : List[Any] = Path(__file__).resolve().parent.parent.parent _SCREAMING_SNAKE_CASE : int = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: _SCREAMING_SNAKE_CASE : Optional[int] = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
183
"""simple docstring""" def _snake_case ( UpperCAmelCase_ : int = 10 ): if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0: raise ValueError("""Invalid input""" ) A__ = 10**n A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(1_0) = }""")
335
0
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=3,__lowerCamelCase=18,__lowerCamelCase=30,__lowerCamelCase=400,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=[0.5, 0.5, 0.5],__lowerCamelCase=[0.5, 0.5, 0.5],): A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size if size is not None else {'''height''': 18, '''width''': 20} A__ = do_thumbnail A__ = do_align_axis A__ = do_pad A__ = do_normalize A__ = image_mean A__ = image_std def UpperCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE = DonutImageProcessor if is_vision_available() else None def UpperCamelCase ( self ): A__ = DonutImageProcessingTester(self ) @property def UpperCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ): A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase,'''do_resize''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''size''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''do_thumbnail''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''do_align_long_axis''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''do_pad''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''do_normalize''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''image_mean''' ) ) self.assertTrue(hasattr(__lowerCamelCase,'''image_std''' ) ) def UpperCamelCase ( self ): A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size,{'''height''': 18, '''width''': 20} ) A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=42 ) self.assertEqual(image_processor.size,{'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=(42, 84) ) self.assertEqual(image_processor.size,{'''height''': 84, '''width''': 42} ) def UpperCamelCase ( self ): pass @is_flaky() def UpperCamelCase ( self ): # Initialize image_processing A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase,Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),) # Test batched A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),) @is_flaky() def UpperCamelCase ( self ): # Initialize image_processing A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase,np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),) # Test batched A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),) @is_flaky() def UpperCamelCase ( self ): # Initialize image_processing A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase,torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),) # Test batched A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ),)
39
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ): A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) A__ = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) A__ = '''A painting of a squirrel eating a burger''' A__ = torch.manual_seed(0 ) A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' ) A__ = output.images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase ( self ): A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A__ = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) sd_pipe.set_scheduler('''sample_euler''' ) A__ = '''A painting of a squirrel eating a burger''' A__ = torch.manual_seed(0 ) A__ = sd_pipe([prompt],generator=__lowerCamelCase,guidance_scale=9.0,num_inference_steps=20,output_type='''np''' ) A__ = output.images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def UpperCamelCase ( self ): A__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A__ = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) A__ = '''A painting of a squirrel eating a burger''' A__ = torch.manual_seed(0 ) A__ = sd_pipe( [prompt],generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=15,output_type='''np''',use_karras_sigmas=__lowerCamelCase,) A__ = output.images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A__ = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
39
1
"""simple docstring""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any]=False ): try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCAmelCase : List[str] = os.path.abspath(_snake_case ) logger.info(f'''Loading PyTorch weights from {pt_path}''' ) lowerCAmelCase : Dict = torch.load(_snake_case , map_location='''cpu''' ) logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCAmelCase : Union[str, Any] = convert_pytorch_state_dict_to_flax(_snake_case , _snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(_snake_case , _snake_case ) return flax_state_dict def _snake_case ( _snake_case : Tuple[str] , _snake_case : np.ndarray , _snake_case : Dict[str, jnp.ndarray] , _snake_case : str , ): def is_key_or_prefix_key_in_dict(_snake_case : Tuple[str] ) -> bool: return len(set(_snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCAmelCase : str = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCAmelCase : int = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_snake_case ): lowerCAmelCase : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase : Any = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_snake_case ): lowerCAmelCase : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase : str = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCAmelCase : str = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCAmelCase : Any = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCAmelCase : Tuple = pt_tuple_key[-2] + '''_v''' if name is not None: lowerCAmelCase : Optional[Any] = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] ): # convert pytorch tensor to numpy lowerCAmelCase : Any = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase : Any = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCAmelCase : Dict = flax_model.params['''params'''] else: lowerCAmelCase : Dict = flax_model.params lowerCAmelCase : Tuple = flatten_dict(_snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase : Any = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(_snake_case ) lowerCAmelCase : int = {} lowerCAmelCase : Dict = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase : Dict = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase : Union[str, Any] = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCAmelCase : int = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase : int = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase, lowerCAmelCase : Optional[Any] = rename_key_and_reshape_tensor( _snake_case , _snake_case , _snake_case , _snake_case ) # add model prefix if necessary lowerCAmelCase : List[Any] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase : Optional[Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCAmelCase : Optional[int] = jnp.asarray(_snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_snake_case , _snake_case ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase : List[Any] = jnp.asarray(_snake_case ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase : List[Any] = jnp.asarray(_snake_case ) return unflatten_dict(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ): import torch # Load the index lowerCAmelCase : Tuple = {} for shard_file in shard_filenames: # load using msgpack utils lowerCAmelCase : Optional[int] = torch.load(_snake_case ) lowerCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} lowerCAmelCase : Union[str, Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCAmelCase : Any = flax_model.params['''params'''] lowerCAmelCase : str = flatten_dict(_snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCAmelCase : Optional[Any] = flax_model.params lowerCAmelCase : Dict = flatten_dict(_snake_case ) lowerCAmelCase : Tuple = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCAmelCase : Tuple = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase : List[str] = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCAmelCase : Dict = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase : str = pt_tuple_key[1:] # Correctly rename weight parameters lowerCAmelCase, lowerCAmelCase : Tuple = rename_key_and_reshape_tensor( _snake_case , _snake_case , _snake_case , _snake_case ) # add model prefix if necessary lowerCAmelCase : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase : List[Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCAmelCase : List[str] = jnp.asarray(_snake_case ) continue if "var" in flax_key[-1]: lowerCAmelCase : List[str] = jnp.asarray(_snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_snake_case , _snake_case ) continue # also add unexpected weight so that warning is thrown lowerCAmelCase : str = jnp.asarray(_snake_case ) else: # also add unexpected weight so that warning is thrown lowerCAmelCase : Tuple = jnp.asarray(_snake_case ) return unflatten_dict(_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] ): lowerCAmelCase : Optional[int] = os.path.abspath(_snake_case ) logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCAmelCase : str = getattr(_snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(_snake_case , '''rb''' ) as state_f: try: lowerCAmelCase : Any = from_bytes(_snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(_snake_case , _snake_case ) def _snake_case ( _snake_case : Tuple , _snake_case : Dict ): try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCAmelCase : str = flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , _snake_case ) ).values() if any(_snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map( lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case ) lowerCAmelCase : Union[str, Any] = flatten_dict(_snake_case ) lowerCAmelCase : Any = pt_model.state_dict() lowerCAmelCase : Optional[Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCAmelCase : int = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCAmelCase : Optional[int] = [] lowerCAmelCase : Optional[Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase : Dict = flax_key_tuple[0] == pt_model.base_model_prefix lowerCAmelCase : List[str] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCAmelCase : List[Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCAmelCase : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_snake_case ) not in pt_model_dict: # conv layer lowerCAmelCase : str = flax_key_tuple[:-1] + ('''weight''',) lowerCAmelCase : List[Any] = jnp.transpose(_snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_snake_case ) not in pt_model_dict: # linear layer lowerCAmelCase : int = flax_key_tuple[:-1] + ('''weight''',) lowerCAmelCase : List[Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCAmelCase : List[str] = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCAmelCase : List[Any] = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCAmelCase : Union[str, Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCAmelCase : List[Any] = '''.'''.join(_snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCAmelCase : Optional[int] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCAmelCase : str = key.split('''.''' ) lowerCAmelCase : str = None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCAmelCase : Any = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCAmelCase : Optional[int] = key_components[-2] + '''_v''' if name is not None: lowerCAmelCase : Union[str, Any] = key_components[:-3] + [name] lowerCAmelCase : List[str] = '''.'''.join(_snake_case ) lowerCAmelCase : List[str] = key if flax_key in special_pt_names: lowerCAmelCase : Optional[int] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCAmelCase : Union[str, Any] = np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor lowerCAmelCase : List[Any] = torch.from_numpy(_snake_case ) # remove from missing keys missing_keys.remove(_snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(_snake_case ) pt_model.load_state_dict(_snake_case ) # re-transform missing_keys to list lowerCAmelCase : Optional[int] = list(_snake_case ) if len(_snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(_snake_case ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
60
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , a_ : str , a_ : Any=7 , a_ : Any=3 , a_ : List[Any]=18 , a_ : str=30 , a_ : str=4_00 , a_ : Union[str, Any]=True , a_ : Any=None , a_ : Any=True , ): lowerCAmelCase_ : Dict = size if size is not None else {"height": 18, "width": 18} lowerCAmelCase_ : List[Any] = parent lowerCAmelCase_ : List[str] = batch_size lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : Optional[int] = image_size lowerCAmelCase_ : List[str] = min_resolution lowerCAmelCase_ : str = max_resolution lowerCAmelCase_ : Dict = do_resize lowerCAmelCase_ : List[str] = size lowerCAmelCase_ : Optional[Any] = apply_ocr def lowerCamelCase ( self : Optional[Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : Any = LayoutLMvaImageProcessingTester(self ) @property def lowerCamelCase ( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "apply_ocr" ) ) def lowerCamelCase ( self : List[Any] ): lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def lowerCamelCase ( self : List[str] ): pass def lowerCamelCase ( self : Tuple ): # Initialize image_processing lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , a_ ) self.assertIsInstance(encoding.boxes , a_ ) # Test batched lowerCAmelCase_ : Optional[Any] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCamelCase ( self : Dict ): # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCamelCase ( self : Any ): # Initialize image_processing lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCAmelCase_ : Optional[int] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def lowerCamelCase ( self : int ): # with apply_OCR = True lowerCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase_ : Union[str, Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCAmelCase_ : List[str] = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase_ : List[Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCAmelCase_ : int = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , a_ ) self.assertListEqual(encoding.boxes , a_ ) # with apply_OCR = False lowerCAmelCase_ : Dict = LayoutLMvaImageProcessor(apply_ocr=a_ ) lowerCAmelCase_ : Tuple = image_processing(a_ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
161
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ): lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20} lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18} lowerCAmelCase_ : int = parent lowerCAmelCase_ : Dict = batch_size lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : str = image_size lowerCAmelCase_ : int = min_resolution lowerCAmelCase_ : Tuple = max_resolution lowerCAmelCase_ : str = do_resize lowerCAmelCase_ : List[Any] = size lowerCAmelCase_ : Any = do_center_crop lowerCAmelCase_ : Tuple = crop_size def lowerCamelCase ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self ) @property def lowerCamelCase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a_ , "do_resize" ) ) self.assertTrue(hasattr(a_ , "size" ) ) self.assertTrue(hasattr(a_ , "do_center_crop" ) ) self.assertTrue(hasattr(a_ , "crop_size" ) ) def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def lowerCamelCase ( self : Tuple ): pass def lowerCamelCase ( self : Any ): # Initialize image_processing lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ ) for image in image_inputs: self.assertIsInstance(a_ , Image.Image ) # Test not batched input lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : str ): # Initialize image_processing lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , np.ndarray ) # Test not batched input lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ ) for image in image_inputs: self.assertIsInstance(a_ , torch.Tensor ) # Test not batched input lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
161
1
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCAmelCase__ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class a ( unittest.TestCase ): def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=18 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : int=400 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , ): _UpperCAmelCase = size if size is not None else {"""height""": 20, """width""": 20} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = do_convert_rgb _UpperCAmelCase = [512, 1024, 2048, 4096] _UpperCAmelCase = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def lowerCAmelCase_ ( self : Optional[int] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" _UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Tuple = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = PixaStructImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.image_processor_tester.prepare_dummy_image() _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) _UpperCAmelCase = 2048 _UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) ) def lowerCAmelCase_ ( self : Optional[int] ): # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( __lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Optional[Any] ): # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 _UpperCAmelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowerCAmelCase ): _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches _UpperCAmelCase = """Hello""" _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase , header_text=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( __lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase , header_text=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) _UpperCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( __lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase_ ( self : Dict ): # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( __lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = PixaStructImageProcessingTester(self , num_channels=4 ) _UpperCAmelCase = 3 @property def lowerCAmelCase_ ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) ) def lowerCAmelCase_ ( self : Any ): # Initialize image_processor _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _UpperCAmelCase = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _UpperCAmelCase = image_processor( __lowerCAmelCase , return_tensors="""pt""" , max_patches=__lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
289
"""simple docstring""" import math class a : def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ): _UpperCAmelCase = 0.0 _UpperCAmelCase = 0.0 for i in range(len(__lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ): for i in range(len(__lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCAmelCase ( ): """simple docstring""" # Training Examples ( m, n ) _UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _UpperCAmelCase = SelfOrganizingMap() _UpperCAmelCase = 3 _UpperCAmelCase = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _UpperCAmelCase = training_samples[j] # Compute the winning vector _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # Update the winning vector _UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase ) # classify test sample _UpperCAmelCase = [0, 0, 0, 1] _UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
289
1
"""simple docstring""" from __future__ import annotations from dataclasses import dataclass @dataclass class UpperCAmelCase__ : """simple docstring""" UpperCAmelCase__ : float UpperCAmelCase__ : TreeNode | None = None UpperCAmelCase__ : TreeNode | None = None def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : TreeNode | None ): # Validation def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool: if node is None: return True if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Each node should be type of TreeNode and data should be float.' ) def is_binary_search_tree_recursive_check( SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , SCREAMING_SNAKE_CASE__ ) ) return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float('inf' ) , float('inf' ) ) if __name__ == "__main__": import doctest doctest.testmod()
354
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ): __UpperCamelCase =filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() ) __UpperCamelCase =sum([np.prod(p.size() ) for p in model_parameters] ) return params _A = logging.getLogger(__name__) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): if metric == "rouge2": __UpperCamelCase ='{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __UpperCamelCase ='{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __UpperCamelCase ='{val_avg_em:.4f}-{step_count}' elif metric == "loss": __UpperCamelCase ='{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) __UpperCamelCase =ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return EarlyStopping( monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , ) class UpperCAmelCase__ ( pl.Callback ): """simple docstring""" def _a ( self , A_ , A_ ) -> int: __UpperCamelCase ={f'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(A_ ) @rank_zero_only def _a ( self , A_ , A_ , A_ , A_=True ) -> None: logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) __UpperCamelCase =trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __UpperCamelCase =Path(pl_module.hparams.output_dir ) if type_path == "test": __UpperCamelCase =od / 'test_results.txt' __UpperCamelCase =od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __UpperCamelCase =od / f'{type_path}_results/{trainer.global_step:05d}.txt' __UpperCamelCase =od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=A_ ) generations_file.parent.mkdir(exist_ok=A_ ) with open(A_ , 'a+' ) as writer: for key in sorted(A_ ): if key in ["log", "progress_bar", "preds"]: continue __UpperCamelCase =metrics[key] if isinstance(A_ , torch.Tensor ): __UpperCamelCase =val.item() __UpperCamelCase =f'{key}: {val:.6f}\n' writer.write(A_ ) if not save_generations: return if "preds" in metrics: __UpperCamelCase ='\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(A_ ) @rank_zero_only def _a ( self , A_ , A_ ) -> Optional[int]: try: __UpperCamelCase =pl_module.model.model.num_parameters() except AttributeError: __UpperCamelCase =pl_module.model.num_parameters() __UpperCamelCase =count_trainable_parameters(A_ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def _a ( self , A_ , A_ ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(A_ , A_ , 'test' ) @rank_zero_only def _a ( self , A_ , A_ ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
117
0
"""simple docstring""" def lowercase ( A_ )-> int: '''simple docstring''' a : int = hex_num.strip() if not hex_num: raise ValueError("No value was passed to the function" ) a : str = hex_num[0] == "-" if is_negative: a : str = hex_num[1:] try: a : int = int(A_ , 16 ) except ValueError: raise ValueError("Invalid value was passed to the function" ) a : str = "" while int_num > 0: a : List[Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("-" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { 'configuration_clap': [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapAudioConfig', 'ClapConfig', 'ClapTextConfig', ], 'processing_clap': ['ClapProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST', 'ClapModel', 'ClapPreTrainedModel', 'ClapTextModel', 'ClapTextModelWithProjection', 'ClapAudioModel', 'ClapAudioModelWithProjection', ] _a = ['ClapFeatureExtractor'] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
23
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class A_ (lowercase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,) SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),) def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowercase_ ) return config def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Union[str, Any] = self.dummy_sample UpperCAmelCase_ : Dict = 0.1 * sample UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals UpperCAmelCase_ : int = dummy_past_residuals[:] UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs ) UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ ) UpperCAmelCase_ : Optional[int] = self.dummy_sample UpperCAmelCase_ : List[str] = 0.1 * sample UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:] UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase__ ( self , **lowercase_ ): """simple docstring""" UpperCAmelCase_ : str = self.scheduler_classes[0] UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ ) UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ ) UpperCAmelCase_ : Tuple = 10 UpperCAmelCase_ : List[str] = self.dummy_model() UpperCAmelCase_ : str = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample return sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = dict(self.forward_default_kwargs ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ ) for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : Any = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) UpperCAmelCase_ : str = self.dummy_sample UpperCAmelCase_ : List[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ): scheduler.set_timesteps(lowercase_ ) elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ): UpperCAmelCase_ : List[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase_ : List[str] = dummy_past_residuals[:] UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCamelCase__ ( self ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowercase_ ) UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0] UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def UpperCamelCase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 UpperCAmelCase_ : List[Any] = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase_ : List[Any] = self.dummy_sample UpperCAmelCase_ : Optional[int] = 0.1 * sample UpperCAmelCase_ : List[str] = self.get_scheduler_config() UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" with self.assertRaises(lowercase_ ): UpperCAmelCase_ : List[str] = self.scheduler_classes[0] UpperCAmelCase_ : str = self.get_scheduler_config() UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.full_loop() UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" ) UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def UpperCamelCase__ ( self ): """simple docstring""" # We specify different beta, so that the first alpha is 0.99 UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 ) UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) ) UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
23
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : int ) -> Tuple: """simple docstring""" lowercase__ = 0 def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : int ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = Path(_UpperCAmelCase ) / """preprocessor_config.json""" lowercase__ = Path(_UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_UpperCAmelCase , """w""" ) ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = Path(_UpperCAmelCase ) / """preprocessor_config.json""" lowercase__ = Path(_UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_UpperCAmelCase , """w""" ) ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : List[str] ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowercase__ = Path(_UpperCAmelCase ) / """preprocessor_config.json""" lowercase__ = Path(_UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_UpperCAmelCase , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict() config_dict.pop("""image_processor_type""" ) lowercase__ = CLIPImageProcessor(**_UpperCAmelCase ) # save in new folder model_config.save_pretrained(_UpperCAmelCase ) config.save_pretrained(_UpperCAmelCase ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) # make sure private variable is not incorrectly saved lowercase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Optional[int] ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = Path(_UpperCAmelCase ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_UpperCAmelCase , """w""" ) , ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Dict: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , """clip-base is not a local folder and is not a valid model identifier""" ): lowercase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def lowerCamelCase__ (self : Tuple ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase , revision="""aaaaaa""" ) def lowerCamelCase__ (self : List[Any] ) -> Dict: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase__ (self : int ) -> Optional[int]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase ): lowercase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_UpperCAmelCase ) lowercase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_UpperCAmelCase ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def lowerCamelCase__ (self : List[str] ) -> Dict: """simple docstring""" try: AutoConfig.register("""custom""" , _UpperCAmelCase ) AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase ): AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = Path(_UpperCAmelCase ) / """preprocessor_config.json""" lowercase__ = Path(_UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(_UpperCAmelCase , """w""" ) ) lowercase__ = CustomImageProcessor.from_pretrained(_UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_UpperCAmelCase ) lowercase__ = AutoImageProcessor.from_pretrained(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ (self : int ) -> List[str]: """simple docstring""" class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = True try: AutoConfig.register("""custom""" , _UpperCAmelCase ) AutoImageProcessor.register(_UpperCAmelCase , _UpperCAmelCase ) # If remote code is not set, the default is to use local lowercase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowercase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowercase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(_UpperCAmelCase , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
305
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = [0] * len(__magic_name__ ) lowercase__ = [] lowercase__ = [1] * len(__magic_name__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__magic_name__ ) ): if indegree[i] == 0: queue.append(__magic_name__ ) while queue: lowercase__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowercase__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__magic_name__ ) print(max(__magic_name__ ) ) # Adjacency list of Graph A : Union[str, Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
305
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ = logging.get_logger(__name__) @dataclass class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = [ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self : List[Any] , **a : List[str] )-> Tuple: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase__ = deprecated_arg[3:] setattr(self , a , not kwargs.pop(a ) ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) lowercase__ = kwargs.pop('torchscript' , self.torchscript ) lowercase__ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics ) lowercase__ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level ) super().__init__(**a ) _UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Trace the models using torchscript'} ) _UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Print Xla/PyTorch tpu metrics'} ) _UpperCamelCase : str = field( default='O1' , metadata={ 'help': ( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ' 'See details at https://nvidia.github.io/apex/amp.html' ) } , ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple["torch.device", int]: """simple docstring""" requires_backends(self , ['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: lowercase__ = torch.device('cpu' ) lowercase__ = 0 elif is_torch_tpu_available(): lowercase__ = xm.xla_device() lowercase__ = 0 else: lowercase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) lowercase__ = torch.cuda.device_count() return device, n_gpu @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int: """simple docstring""" return is_torch_tpu_available() and self.tpu @property def SCREAMING_SNAKE_CASE_ ( self : int )-> int: """simple docstring""" requires_backends(self , ['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def SCREAMING_SNAKE_CASE_ ( self : int )-> "torch.device": """simple docstring""" requires_backends(self , ['torch'] ) return self._setup_devices[0] @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" requires_backends(self , ['torch'] ) return self._setup_devices[1] @property def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" return self.n_gpu > 0
352
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if exponent == 1: return base if exponent % 2 == 0: lowercase__ = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1777 , _SCREAMING_SNAKE_CASE = 1855 , _SCREAMING_SNAKE_CASE = 8 ) -> int: lowercase__ = base for _ in range(1 , _SCREAMING_SNAKE_CASE ): lowercase__ = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits ) return result if __name__ == "__main__": print(f'''{solution() = }''')
269
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> int: while a != 0: _lowerCAmelCase , _lowerCAmelCase : Dict = b % a, a return b def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> int: if gcd(__lowercase ,__lowercase ) != 1: _lowerCAmelCase : str = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(__lowercase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = 1, 0, a _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = 0, 1, m while va != 0: _lowerCAmelCase : str = ua // va _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
44
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType A : List[str] = logging.get_logger(__name__) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = '''vision-encoder-decoder''' lowerCamelCase__ = True def __init__( self : int , **__magic_name__ : List[str] ) -> List[str]: super().__init__(**__magic_name__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'''A configuraton of type {self.model_type} cannot be instantiated because ''' F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop("encoder" ) SCREAMING_SNAKE_CASE_ = encoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE_ = kwargs.pop("decoder" ) SCREAMING_SNAKE_CASE_ = decoder_config.pop("model_type" ) SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = AutoConfig.for_model(__magic_name__ , **__magic_name__ ) SCREAMING_SNAKE_CASE_ = True @classmethod def __A ( cls : List[Any] , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , **__magic_name__ : Optional[Any] ) -> PretrainedConfig: logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ ) def __A ( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.encoder.to_dict() SCREAMING_SNAKE_CASE_ = self.decoder.to_dict() SCREAMING_SNAKE_CASE_ = self.__class__.model_type return output class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __A ( self : Union[str, Any] ) -> float: return 1e-4 @property def __A ( self : str ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self : Dict ) -> Mapping[str, Mapping[int, str]]: SCREAMING_SNAKE_CASE_ = OrderedDict() SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"} SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"} SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "encoder_sequence"} return common_inputs def __A ( self : Dict , __magic_name__ : "PreTrainedTokenizerBase" , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]: import torch SCREAMING_SNAKE_CASE_ = OrderedDict() SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_input["input_ids"].shape SCREAMING_SNAKE_CASE_ = (batch, encoder_sequence, self._config.encoder_hidden_size) SCREAMING_SNAKE_CASE_ = dummy_input.pop("input_ids" ) SCREAMING_SNAKE_CASE_ = dummy_input.pop("attention_mask" ) SCREAMING_SNAKE_CASE_ = torch.zeros(__magic_name__ ) return common_inputs class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self : Union[str, Any] ) -> None: pass def __A ( self : Optional[int] , __magic_name__ : PretrainedConfig ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(__magic_name__ ) def __A ( self : Union[str, Any] , __magic_name__ : PretrainedConfig , __magic_name__ : PretrainedConfig , __magic_name__ : str = "default" ) -> OnnxConfig: SCREAMING_SNAKE_CASE_ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__magic_name__ , __magic_name__ )
356
import torch def a__ ( ): if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE_ = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
305
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ :Optional[int] = logging.getLogger() def UpperCamelCase ( ): '''simple docstring''' lowercase = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase = parser.parse_args() return args.f def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = {} lowercase = os.path.join(lowerCAmelCase__ , '''all_results.json''' ) if os.path.exists(lowerCAmelCase__ ): with open(lowerCAmelCase__ , '''r''' ) as f: lowercase = json.load(lowerCAmelCase__ ) else: raise ValueError(f'can\'t find {path}' ) return results def UpperCamelCase ( ): '''simple docstring''' lowercase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ :int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowercase ( SCREAMING_SNAKE_CASE__ ): @classmethod def A__ ( cls): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU lowercase = tempfile.mkdtemp() lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''') write_basic_config(save_location=cls.configPath) lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def A__ ( cls): shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''glue_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertLess(result['''perplexity'''] ,1_0_0) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''clm_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertLess(result['''perplexity'''] ,4_2) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''mlm_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu lowercase = 7 if get_gpu_count() > 1 else 2 lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75) self.assertLess(result['''train_loss'''] ,0.5) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''ner_no_trainer'''))) @unittest.skip(reason='''Fix me @muellerzr''') @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] ,2_8) self.assertGreaterEqual(result['''eval_exact'''] ,2_8) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''qa_no_trainer'''))) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8) self.assertTrue(os.path.exists(os.path.join(A__ ,'''swag_no_trainer'''))) @slow @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0) self.assertGreaterEqual(result['''eval_rouge2'''] ,2) self.assertGreaterEqual(result['''eval_rougeL'''] ,7) self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''summarization_no_trainer'''))) @slow @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_bleu'''] ,3_0) self.assertTrue(os.path.exists(os.path.join(A__ ,'''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''translation_no_trainer'''))) @slow def A__ ( self): lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(A__) lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs) lowercase = get_results(A__) self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.10) @mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''}) def A__ ( self): lowercase = self.get_auto_remove_tmp_dir() lowercase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) lowercase = get_results(A__) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6) self.assertTrue(os.path.exists(os.path.join(A__ ,'''step_1'''))) self.assertTrue(os.path.exists(os.path.join(A__ ,'''image_classification_no_trainer''')))
101
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase__ :str = logging.get_logger(__name__) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = '''huggingface/label-files''' lowercase = '''imagenet-1k-id2label.json''' lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) ) lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowercase = {v: k for k, v in idalabel.items()} lowercase = '''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase = BitConfig( conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if "stem.conv" in name: lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: lowercase = name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: lowercase = name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): lowercase = '''bit.''' + name if "bit" not in name and "classifier" not in name: lowercase = '''bit.encoder.''' + name return name def UpperCamelCase ( ): '''simple docstring''' lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ): '''simple docstring''' lowercase = get_config(lowerCAmelCase__ ) # load original model from timm lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ) timm_model.eval() # load state_dict of original model lowercase = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase = state_dict.pop(lowerCAmelCase__ ) lowercase = val.squeeze() if '''head''' in key else val # load HuggingFace model lowercase = BitForImageClassification(lowerCAmelCase__ ) model.eval() model.load_state_dict(lowerCAmelCase__ ) # create image processor lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) ) lowercase = transform.transforms lowercase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } lowercase = BitImageProcessor( do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase = prepare_img() lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 ) lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) # verify logits with torch.no_grad(): lowercase = model(lowerCAmelCase__ ) lowercase = outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase = timm_model(lowerCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase__ ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(f'ybelkada/{model_name}' ) processor.push_to_hub(f'ybelkada/{model_name}' ) if __name__ == "__main__": lowercase__ :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) lowercase__ :List[str] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
101
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ["""DeiTFeatureExtractor"""] _lowercase = ["""DeiTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DeiTForImageClassification""", """DeiTForImageClassificationWithTeacher""", """DeiTForMaskedImageModeling""", """DeiTModel""", """DeiTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDeiTForImageClassification""", """TFDeiTForImageClassificationWithTeacher""", """TFDeiTForMaskedImageModeling""", """TFDeiTModel""", """TFDeiTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
229
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=3 , _lowercase=4 , _lowercase=[10, 20, 30, 40] , _lowercase=[2, 2, 3, 2] , _lowercase=True , _lowercase=True , _lowercase=37 , _lowercase="gelu" , _lowercase=10 , _lowercase=0.02 , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=3 , _lowercase=None , ): """simple docstring""" _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_stages _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = out_features _lowerCAmelCase = num_labels _lowerCAmelCase = scope _lowerCAmelCase = num_stages def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _lowercase ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def _lowercase ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , ) def _lowercase ( self , _lowercase , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = UperNetForSemanticSegmentation(config=_lowercase ) model.to(_lowercase ) model.eval() _lowerCAmelCase = model(_lowercase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) = config_and_inputs _lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowercase : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowercase : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {} _lowercase : Dict = False _lowercase : Optional[Any] = False _lowercase : List[str] = False _lowercase : Union[str, Any] = False _lowercase : List[str] = False _lowercase : List[Any] = False def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = UperNetModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 ) def _lowercase ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self ): """simple docstring""" return def _lowercase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(_lowercase ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _lowercase ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _lowercase ( self ): """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowercase ( self ): """simple docstring""" pass def _lowercase ( self ): """simple docstring""" def check_hidden_states_output(_lowercase , _lowercase , _lowercase ): _lowerCAmelCase = model_class(_lowercase ) model.to(_lowercase ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) ) _lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(_lowercase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(_lowercase , _lowercase , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = _config_zero_init(_lowercase ) _lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: _lowerCAmelCase = model_class(config=_lowercase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _lowercase ( self ): """simple docstring""" pass @slow def _lowercase ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) def A (): _lowerCAmelCase = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) _lowerCAmelCase = Image.open(__lowerCamelCase ).convert("""RGB""" ) return image @require_torch @require_vision @slow class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase ) with torch.no_grad(): _lowerCAmelCase = model(**_lowercase ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowercase ) _lowerCAmelCase = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) _lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase ) with torch.no_grad(): _lowerCAmelCase = model(**_lowercase ) _lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , _lowercase ) _lowerCAmelCase = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
229
1
"""simple docstring""" import os def _lowercase ( ) -> Tuple: with open(os.path.dirname(__lowerCAmelCase ) + """/p022_names.txt""" ) as file: SCREAMING_SNAKE_CASE__ : Tuple = str(file.readlines()[0] ) SCREAMING_SNAKE_CASE__ : Optional[int] = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() SCREAMING_SNAKE_CASE__ : int = 0 SCREAMING_SNAKE_CASE__ : Tuple = 0 for i, name in enumerate(__lowerCAmelCase ): for letter in name: name_score += ord(__lowerCAmelCase ) - 64 total_score += (i + 1) * name_score SCREAMING_SNAKE_CASE__ : Tuple = 0 return total_score if __name__ == "__main__": print(solution())
132
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class __a (unittest.TestCase): '''simple docstring''' def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ : Any = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 SCREAMING_SNAKE_CASE__ : Optional[int] = test_metrics @require_cpu def _a ( self ) -> List[Any]: """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def _a ( self ) -> List[str]: """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def _a ( self ) -> int: """simple docstring""" self.test_metrics.main() @require_multi_gpu def _a ( self ) -> Optional[Any]: """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) SCREAMING_SNAKE_CASE__ : List[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_a , env=os.environ.copy() )
132
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = '''▁''' lowerCAmelCase__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } lowerCAmelCase__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } lowerCAmelCase__ = { '''facebook/s2t-small-librispeech-asr''': 1_024, } lowerCAmelCase__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] lowerCAmelCase__ = {'''mustc''': MUSTC_LANGS} class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : List[str] =VOCAB_FILES_NAMES a : Tuple =PRETRAINED_VOCAB_FILES_MAP a : Tuple =MAX_MODEL_INPUT_SIZES a : List[Any] =["input_ids", "attention_mask"] a : List[int] =[] def __init__( self , snake_case__ , snake_case__ , snake_case__="<s>" , snake_case__="</s>" , snake_case__="<pad>" , snake_case__="<unk>" , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): """simple docstring""" lowerCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , do_upper_case=snake_case__ , do_lower_case=snake_case__ , tgt_lang=snake_case__ , lang_codes=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) lowerCAmelCase : Optional[Any] = do_upper_case lowerCAmelCase : Optional[Any] = do_lower_case lowerCAmelCase : List[Any] = load_json(snake_case__ ) lowerCAmelCase : List[str] = {v: k for k, v in self.encoder.items()} lowerCAmelCase : str = spm_file lowerCAmelCase : Optional[int] = load_spm(snake_case__ , self.sp_model_kwargs ) if lang_codes is not None: lowerCAmelCase : Union[str, Any] = lang_codes lowerCAmelCase : Optional[Any] = LANGUAGES[lang_codes] lowerCAmelCase : int = [f"""<lang:{lang}>""" for lang in self.langs] lowerCAmelCase : Union[str, Any] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} lowerCAmelCase : List[Any] = self.lang_tokens lowerCAmelCase : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: lowerCAmelCase : Union[str, Any] = {} @property def lowercase__ ( self ): """simple docstring""" return len(self.encoder ) @property def lowercase__ ( self ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Tuple = new_tgt_lang self.set_tgt_lang_special_tokens(snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : str = self.lang_code_to_id[tgt_lang] lowerCAmelCase : str = [lang_code_id] def lowercase__ ( self , snake_case__ ): """simple docstring""" return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" return self.encoder.get(snake_case__ , self.encoder[self.unk_token] ) def lowercase__ ( self , snake_case__ ): """simple docstring""" return self.decoder.get(snake_case__ , self.unk_token ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : int = [] lowerCAmelCase : Optional[Any] = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: lowerCAmelCase : Any = self.sp_model.decode(snake_case__ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " lowerCAmelCase : Optional[Any] = [] else: current_sub_tokens.append(snake_case__ ) lowerCAmelCase : List[str] = self.sp_model.decode(snake_case__ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase__ ( self , snake_case__ , snake_case__=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) lowerCAmelCase : Optional[Any] = [1] * len(self.prefix_tokens ) lowerCAmelCase : Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCAmelCase : int = self.__dict__.copy() lowerCAmelCase : Any = None return state def __setstate__( self , snake_case__ ): """simple docstring""" lowerCAmelCase : List[str] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase : List[Any] = {} lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase__ ( self , snake_case__ , snake_case__ = None ): """simple docstring""" lowerCAmelCase : int = Path(snake_case__ ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" lowerCAmelCase : str = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) lowerCAmelCase : Any = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder , snake_case__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , snake_case__ ) elif not os.path.isfile(self.spm_file ): with open(snake_case__ , "wb" ) as fi: lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (str(snake_case__ ), str(snake_case__ )) def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict[str, Any] ): '''simple docstring''' lowerCAmelCase : int = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE ) spm.Load(str(SCREAMING_SNAKE_CASE ) ) return spm def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE ) def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' with open(SCREAMING_SNAKE_CASE , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 )
133
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : """simple docstring""" @staticmethod def lowercase__ ( *snake_case__ , **snake_case__ ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" a : Optional[Any] =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCAmelCase : Dict = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def lowercase__ ( self , snake_case__ , snake_case__ ): """simple docstring""" lowerCAmelCase : Optional[Any] = object_detector(examples[0] , threshold=0.0 ) lowerCAmelCase : Dict = len(snake_case__ ) self.assertGreater(snake_case__ , 0 ) self.assertEqual( snake_case__ , [ { "score": ANY(snake_case__ ), "label": ANY(snake_case__ ), "box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )}, } for i in range(snake_case__ ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) lowerCAmelCase : Tuple = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) lowerCAmelCase : Optional[Any] = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = pipeline("zero-shot-object-detection" ) lowerCAmelCase : Dict = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) lowerCAmelCase : Dict = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def lowercase__ ( self ): """simple docstring""" pass @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = 0.2 lowerCAmelCase : List[Any] = pipeline("zero-shot-object-detection" ) lowerCAmelCase : Union[str, Any] = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case__ , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : int = 2 lowerCAmelCase : Any = pipeline("zero-shot-object-detection" ) lowerCAmelCase : Any = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case__ , ) self.assertEqual( nested_simplify(snake_case__ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
133
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase : List[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[str] = {'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : Optional[Any] = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["""input_ids""", """attention_mask"""] a__ = None def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Dict , ) -> int: """simple docstring""" super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: __magic_name__ = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) __magic_name__ = add_prefix_space __magic_name__ = pre_tok_class(**UpperCamelCase__ ) __magic_name__ = add_prefix_space def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : str ) -> BatchEncoding: """simple docstring""" __magic_name__ = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' """ pretokenized inputs.""" ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : int , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ) -> BatchEncoding: """simple docstring""" __magic_name__ = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' """ pretokenized inputs.""" ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __magic_name__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : "Conversation" ) -> List[int]: """simple docstring""" __magic_name__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] ) if len(UpperCamelCase__ ) > self.model_max_length: __magic_name__ = input_ids[-self.model_max_length :] return input_ids
88
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
from __future__ import annotations def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> bool: """simple docstring""" if len(a__ ) == 0: return False _snake_case = len(a__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , a__ ) else: return binary_search(a_list[midpoint + 1 :] , a__ ) if __name__ == "__main__": __A = input('''Enter numbers separated by comma:\n''').strip() __A = [int(item.strip()) for item in user_input.split(''',''')] __A = int(input('''Enter the number to be found in the list:\n''').strip()) __A = '' if binary_search(sequence, target) else 'not ' print(f'''{target} was {not_str}found in {sequence}''')
361
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = (1 - _cos) / 2 _snake_case = 1 - _cos _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = (1 + _cos) / 2 _snake_case = -1 - _cos _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = _sin / 2 _snake_case = 0 _snake_case = -ba _snake_case = 1 + alpha _snake_case = -2 * _cos _snake_case = 1 - alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = 1 - alpha _snake_case = -2 * _cos _snake_case = 1 + alpha _snake_case = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = 1 + alpha * big_a _snake_case = -2 * _cos _snake_case = 1 - alpha * big_a _snake_case = 1 + alpha / big_a _snake_case = -2 * _cos _snake_case = 1 - alpha / big_a _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = (big_a + 1) - (big_a - 1) * _cos _snake_case = (big_a + 1) + (big_a - 1) * _cos _snake_case = (big_a - 1) - (big_a + 1) * _cos _snake_case = (big_a - 1) + (big_a + 1) * _cos _snake_case = 2 * sqrt(_UpperCamelCase ) * alpha _snake_case = big_a * (pmc + aaa) _snake_case = 2 * big_a * mpc _snake_case = big_a * (pmc - aaa) _snake_case = ppmc + aaa _snake_case = -2 * pmpc _snake_case = ppmc - aaa _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" _snake_case = tau * frequency / samplerate _snake_case = sin(_UpperCamelCase ) _snake_case = cos(_UpperCamelCase ) _snake_case = _sin / (2 * q_factor) _snake_case = 10 ** (gain_db / 40) _snake_case = (big_a + 1) - (big_a - 1) * _cos _snake_case = (big_a + 1) + (big_a - 1) * _cos _snake_case = (big_a - 1) - (big_a + 1) * _cos _snake_case = (big_a - 1) + (big_a + 1) * _cos _snake_case = 2 * sqrt(_UpperCamelCase ) * alpha _snake_case = big_a * (ppmc + aaa) _snake_case = -2 * big_a * pmpc _snake_case = big_a * (ppmc - aaa) _snake_case = pmc + aaa _snake_case = 2 * mpc _snake_case = pmc - aaa _snake_case = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
278
0
"""simple docstring""" from math import isqrt, loga def _A (__a ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __a , __a ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = False return [i for i in range(2 , __a ) if is_prime[i]] def _A (__a = 80_08_00 , __a = 80_08_00 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = degree * loga(__a ) SCREAMING_SNAKE_CASE_ : str = int(__a ) SCREAMING_SNAKE_CASE_ : Optional[int] = calculate_prime_numbers(__a ) SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : List[Any] = 0 SCREAMING_SNAKE_CASE_ : List[Any] = len(__a ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
91
'''simple docstring''' import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) _a : Any = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,) return model @property def __lowercase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _a : Union[str, Any] = VQModel( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,) return model @property def __lowercase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _a : Any = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModel(_a ) def __lowercase ( self : Tuple ): '''simple docstring''' _a : Dict = self.dummy_uncond_unet _a : List[Any] = DDIMScheduler() _a : List[Any] = self.dummy_vq_model _a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a ) ldm.to(_a ) ldm.set_progress_bar_config(disable=_a ) _a : List[str] = torch.manual_seed(0 ) _a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images _a : List[str] = torch.manual_seed(0 ) _a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0] _a : Tuple = image[0, -3:, -3:, -1] _a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) _a : Any = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self : Optional[Any] ): '''simple docstring''' _a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' ) ldm.to(_a ) ldm.set_progress_bar_config(disable=_a ) _a : Optional[int] = torch.manual_seed(0 ) _a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images _a : str = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) _a : int = 1E-2 if torch_device != 'mps' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
271
0
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
1
def A_ ( snake_case : int , snake_case : int ) -> int: '''simple docstring''' return abs(snake_case ) if a == 0 else greatest_common_divisor(b % a , snake_case ) def A_ ( snake_case : int , snake_case : int ) -> int: '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. __UpperCamelCase , __UpperCamelCase = y, x % y return abs(snake_case ) def A_ ( ) -> List[str]: '''simple docstring''' try: __UpperCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __UpperCamelCase = int(nums[0] ) __UpperCamelCase = int(nums[1] ) print( f"greatest_common_divisor({num_a}, {num_a}) = " f"{greatest_common_divisor(snake_case , snake_case )}" ) print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case , snake_case )}" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
'''simple docstring''' def SCREAMING_SNAKE_CASE__( ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = 0 for i in range(1 , 10_01 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
31
'''simple docstring''' def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float: '''simple docstring''' UpperCamelCase__ = 0 while len(_UpperCamelCase ) > 1: UpperCamelCase__ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): UpperCamelCase__ = files.index(min(_UpperCamelCase ) ) temp += files[min_index] files.pop(_UpperCamelCase ) files.append(_UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
31
1
from __future__ import annotations def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> list: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase , _UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _UpperCAmelCase = result + left + right return input_list def __A ( __lowerCAmelCase )-> list: """simple docstring""" if len(__lowerCAmelCase ) <= 1: return input_list _UpperCAmelCase = list(__lowerCAmelCase ) # iteration for two-way merging _UpperCAmelCase = 2 while p <= len(__lowerCAmelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ): _UpperCAmelCase = i _UpperCAmelCase = i + p - 1 _UpperCAmelCase = (low + high + 1) // 2 _UpperCAmelCase = merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # final merge of last two parts if p * 2 >= len(__lowerCAmelCase ): _UpperCAmelCase = i _UpperCAmelCase = merge(__lowerCAmelCase , 0 , __lowerCAmelCase , len(__lowerCAmelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _a = input('''Enter numbers separated by a comma:\n''').strip() if user_input == "": _a = [] else: _a = [int(item.strip()) for item in user_input.split(''',''')] print(iter_merge_sort(unsorted))
39
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
39
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase : int = logging.get_logger(__name__) __UpperCamelCase : Union[str, Any] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''segformer''' def __init__( self :Dict , __magic_name__ :List[str]=3 , __magic_name__ :int=4 , __magic_name__ :Union[str, Any]=[2, 2, 2, 2] , __magic_name__ :List[Any]=[8, 4, 2, 1] , __magic_name__ :str=[32, 64, 160, 256] , __magic_name__ :int=[7, 3, 3, 3] , __magic_name__ :Dict=[4, 2, 2, 2] , __magic_name__ :List[Any]=[1, 2, 5, 8] , __magic_name__ :int=[4, 4, 4, 4] , __magic_name__ :Union[str, Any]="gelu" , __magic_name__ :Any=0.0 , __magic_name__ :Optional[int]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :str=0.02 , __magic_name__ :List[str]=0.1 , __magic_name__ :Any=1E-6 , __magic_name__ :Optional[int]=256 , __magic_name__ :Tuple=255 , **__magic_name__ :Tuple , ): '''simple docstring''' super().__init__(**__magic_name__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , ) a = num_channels a = num_encoder_blocks a = depths a = sr_ratios a = hidden_sizes a = patch_sizes a = strides a = mlp_ratios a = num_attention_heads a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = classifier_dropout_prob a = initializer_range a = drop_path_rate a = layer_norm_eps a = decoder_hidden_size a = kwargs.get("""reshape_last_stage""" , __magic_name__ ) a = semantic_loss_ignore_index class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' return 1E-4 @property def lowerCamelCase__ ( self :str ): '''simple docstring''' return 12
361
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): UpperCamelCase__ = CanineTokenizer UpperCamelCase__ = False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' super().setUp() a = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return CanineTokenizer.from_pretrained("""google/canine-s""" ) def lowerCamelCase__ ( self :Tuple , **__magic_name__ :Dict ): '''simple docstring''' a = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) a = 1024 return tokenizer @require_torch def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.canine_tokenizer a = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""] # fmt: off a = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) self.assertIsInstance(__magic_name__ , __magic_name__ ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""] a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("""input_ids""" , __magic_name__ ) self.assertIn("""attention_mask""" , __magic_name__ ) self.assertIn("""token_type_ids""" , __magic_name__ ) @require_torch def lowerCamelCase__ ( self :Any ): '''simple docstring''' a = self.canine_tokenizer a = [ """What's the weater?""", """It's about 25 degrees.""", ] a = tokenizer( text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) shutil.rmtree(__magic_name__ ) a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a = tempfile.mkdtemp() a = """ He is very happy, UNwant\u00E9d,running""" a = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a = chr(0Xe_0_0_7 ) additional_special_tokens.append(__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) tokenizer.save_pretrained(__magic_name__ ) a = tokenizer.__class__.from_pretrained(__magic_name__ ) a = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__magic_name__ ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a , a = self.get_clean_sequence(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_5 a = chr(__magic_name__ ) tokenizer.add_special_tokens({"""cls_token""": special_token} ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) a = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , input_encoded + special_token_id ) a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = chr(0Xe_0_0_5 ) a = chr(0Xe_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} ) a = tokenizer.tokenize(__magic_name__ ) a = tokenizer.tokenize(__magic_name__ ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(len(__magic_name__ ) , 1 ) self.assertEqual(token_a[0] , __magic_name__ ) self.assertEqual(token_a[0] , __magic_name__ ) @require_tokenizers def lowerCamelCase__ ( self :Dict ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = AddedToken(__magic_name__ , lstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(__magic_name__ ) tokenizer.from_pretrained(__magic_name__ ) def lowerCamelCase__ ( self :int ): '''simple docstring''' a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: a = json.load(__magic_name__ ) # a special token for Canine can be defined as follows: a = 0Xe_0_0_6 a = chr(__magic_name__ ) a = [new_token_a] a = [new_token_a] with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(__magic_name__ , __magic_name__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a = 0Xe_0_0_7 a = chr(__magic_name__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a = [AddedToken(__magic_name__ , lstrip=__magic_name__ )] a = tokenizer_class.from_pretrained( __magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 ) self.assertIn(__magic_name__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = """hello world""" if self.space_between_special_tokens: a = """[CLS] hello world [SEP]""" else: a = input a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) a = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(__magic_name__ , [output, output.lower()] ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): a = [ """bos_token""", """eos_token""", """unk_token""", """sep_token""", """pad_token""", """cls_token""", """mask_token""", ] a = """a""" a = ord(__magic_name__ ) for attr in attributes_list: setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , attr + """_id""" , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ ) self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] ) a = 0Xe_0_0_6 a = chr(__magic_name__ ) setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] ) self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] ) def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :str ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Any ): '''simple docstring''' pass def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' pass
347
0
"""simple docstring""" from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __a = logging.get_logger(__name__) def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def A_ ( _lowercase, _lowercase, _lowercase = None ): '''simple docstring''' snake_case_ :List[Any] = tesseract_config if tesseract_config is not None else """""" # apply OCR snake_case_ :Union[str, Any] = to_pil_image(__snake_case ) snake_case_, snake_case_ :str = pil_image.size snake_case_ :List[Any] = pytesseract.image_to_data(__snake_case, lang=__snake_case, output_type="""dict""", config=__snake_case ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ :Union[str, Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates snake_case_ :Tuple = [idx for idx, word in enumerate(__snake_case ) if not word.strip()] snake_case_ :Optional[Any] = [word for idx, word in enumerate(__snake_case ) if idx not in irrelevant_indices] snake_case_ :Union[str, Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] snake_case_ :Union[str, Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] snake_case_ :List[str] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] snake_case_ :Union[str, Any] = [coord for idx, coord in enumerate(__snake_case ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format snake_case_ :Optional[Any] = [] for x, y, w, h in zip(__snake_case, __snake_case, __snake_case, __snake_case ): snake_case_ :Optional[Any] = [x, y, x + w, y + h] actual_boxes.append(__snake_case ) # finally, normalize the bounding boxes snake_case_ :List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__snake_case, __snake_case, __snake_case ) ) assert len(__snake_case ) == len(__snake_case ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A : int = ['pixel_values'] def __init__( self: str , snake_case: str = True , snake_case: List[Any] = None , snake_case: Union[str, Any] = PILImageResampling.BILINEAR , snake_case: Union[str, Any] = True , snake_case: List[str] = None , snake_case: Optional[int] = "" , **snake_case: int , ) -> str: super().__init__(**snake_case ) snake_case_ :Tuple = size if size is not None else {"""height""": 224, """width""": 224} snake_case_ :List[str] = get_size_dict(snake_case ) snake_case_ :Any = do_resize snake_case_ :Dict = size snake_case_ :Dict = resample snake_case_ :Optional[int] = apply_ocr snake_case_ :Any = ocr_lang snake_case_ :Any = tesseract_config def lowerCAmelCase_ ( self: Optional[Any] , snake_case: str , snake_case: Any , snake_case: Optional[Any] = PILImageResampling.BILINEAR , snake_case: List[str] = None , **snake_case: int , ) -> List[str]: snake_case_ :Any = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" ) snake_case_ :Tuple = (size["""height"""], size["""width"""]) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def lowerCAmelCase_ ( self: Dict , snake_case: Any , snake_case: Optional[int] = None , snake_case: Optional[Any] = None , snake_case: Dict = None , snake_case: List[Any] = None , snake_case: Any = None , snake_case: List[Any] = None , snake_case: Optional[Any] = None , snake_case: Optional[int] = ChannelDimension.FIRST , **snake_case: Dict , ) -> int: snake_case_ :int = do_resize if do_resize is not None else self.do_resize snake_case_ :str = size if size is not None else self.size snake_case_ :Optional[Any] = get_size_dict(snake_case ) snake_case_ :List[str] = resample if resample is not None else self.resample snake_case_ :Optional[int] = apply_ocr if apply_ocr is not None else self.apply_ocr snake_case_ :Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang snake_case_ :Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config snake_case_ :Dict = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. snake_case_ :int = [to_numpy_array(snake_case ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) snake_case_ :List[Any] = [] snake_case_ :Optional[int] = [] for image in images: snake_case_, snake_case_ :Dict = apply_tesseract(snake_case , snake_case , snake_case ) words_batch.append(snake_case ) boxes_batch.append(snake_case ) if do_resize: snake_case_ :Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) snake_case_ :Any = [flip_channel_order(snake_case ) for image in images] snake_case_ :Tuple = [to_channel_dimension_format(snake_case , snake_case ) for image in images] snake_case_ :str = BatchFeature(data={"""pixel_values""": images} , tensor_type=snake_case ) if apply_ocr: snake_case_ :Union[str, Any] = words_batch snake_case_ :Tuple = boxes_batch return data
66
'''simple docstring''' import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =FunnelTokenizer lowercase : List[str] =FunnelTokenizerFast lowercase : Union[str, Any] =True lowercase : int =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''<unk>''', '''<cls>''', '''<sep>''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''' ) lowerCamelCase_ =len(inputs['''input_ids'''] ) - 1 self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len ) lowerCamelCase_ =tokenizer('''UNwant\u00E9d,running''', '''UNwant\u00E9d,running''' ) self.assertListEqual(inputs['''token_type_ids'''], [2] + [0] * sentence_len + [1] * sentence_len )
75
0
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Optional[Any] = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__:Optional[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } SCREAMING_SNAKE_CASE__:str = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } class snake_case__ ( snake_case_ ): _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : int = ["""input_ids""", """attention_mask"""] _snake_case : List[int] = [] def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="[SEP]" , lowerCamelCase="[MASK]" , lowerCamelCase="[CLS]" , lowerCamelCase = None , **lowerCamelCase , ): __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token __a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , sep_token=lowerCamelCase , mask_token=lowerCamelCase , cls_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , ) __a = vocab_file __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase ) @property def a__ ( self ): return self.sp_model.get_piece_size() def a__ ( self ): __a = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __a = self.__dict__.copy() __a = None return state def __setstate__( self , lowerCamelCase ): __a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __a = {} __a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a__ ( self , lowerCamelCase ): return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase ) def a__ ( self , lowerCamelCase ): return self.sp_model.piece_to_id(lowerCamelCase ) def a__ ( self , lowerCamelCase ): __a = self.sp_model.IdToPiece(lowerCamelCase ) return token def a__ ( self , lowerCamelCase ): __a = [] __a = "" __a = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase ) + token __a = True __a = [] else: current_sub_tokens.append(lowerCamelCase ) __a = False out_string += self.sp_model.decode(lowerCamelCase ) return out_string.strip() def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ): __a = kwargs.pop("use_source_tokenizer" , lowerCamelCase ) __a = self.convert_ids_to_tokens(lowerCamelCase , skip_special_tokens=lowerCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __a = [] __a = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase ) ) __a = [] sub_texts.append(lowerCamelCase ) else: current_sub_text.append(lowerCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __a = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(lowerCamelCase ) ) else: __a = "".join(lowerCamelCase ) __a = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __a = self.clean_up_tokenization(lowerCamelCase ) return clean_text else: return text def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if not os.path.isdir(lowerCamelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase , "wb" ) as fi: __a = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase ) return (out_vocab_file,) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1] def a__ ( self , lowerCamelCase , lowerCamelCase = None ): __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
268
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def _lowerCamelCase( a ): __a = torch.exp(a ) __a = torch.sum(a , dim=1 ) # sum of exp(x_i) __a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(a ) - B / A class snake_case__ ( nn.Module ): def __init__( self , lowerCamelCase ): super().__init__() __a = config.output_attentions __a = config.output_hidden_states __a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) __a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) __a = [-1 for _ in range(config.num_hidden_layers )] def a__ ( self , lowerCamelCase ): if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int): for i in range(len(self.early_exit_entropy ) ): __a = x else: __a = x def a__ ( self , lowerCamelCase ): __a = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ): __a = () __a = () __a = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __a = all_hidden_states + (hidden_states,) __a = layer_module( lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase ) __a = layer_outputs[0] if self.output_attentions: __a = all_attentions + (layer_outputs[1],) __a = (hidden_states,) if self.output_hidden_states: __a = current_outputs + (all_hidden_states,) if self.output_attentions: __a = current_outputs + (all_attentions,) __a = self.highway[i](lowerCamelCase ) # logits, pooled_output if not self.training: __a = highway_exit[0] __a = entropy(lowerCamelCase ) __a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __a = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(lowerCamelCase , i + 1 ) else: __a = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __a = all_hidden_states + (hidden_states,) __a = (hidden_states,) if self.output_hidden_states: __a = outputs + (all_hidden_states,) if self.output_attentions: __a = outputs + (all_attentions,) __a = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, ) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase ): super().__init__(lowerCamelCase ) __a = config __a = BertEmbeddings(lowerCamelCase ) __a = DeeBertEncoder(lowerCamelCase ) __a = BertPooler(lowerCamelCase ) self.init_weights() def a__ ( self ): self.encoder.init_highway_pooler(self.pooler ) def a__ ( self ): return self.embeddings.word_embeddings def a__ ( self , lowerCamelCase ): __a = value def a__ ( self , lowerCamelCase ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(lowerCamelCase ) @add_start_docstrings_to_model_forward(lowerCamelCase ) def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: __a = input_ids.size() elif inputs_embeds is not None: __a = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) __a = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __a = torch.ones(lowerCamelCase , device=lowerCamelCase ) if encoder_attention_mask is None: __a = torch.ones(lowerCamelCase , device=lowerCamelCase ) if token_type_ids is None: __a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __a = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __a = encoder_attention_mask[:, None, None, :] __a = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __a = (1.0 - encoder_extended_attention_mask) * -1_0000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers ) __a = self.embeddings( input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase ) __a = self.encoder( lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = encoder_outputs[0] __a = self.pooler(lowerCamelCase ) __a = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase ): __a = message __a = exit_layer # start from 1! class snake_case__ ( nn.Module ): def __init__( self , lowerCamelCase ): super().__init__() __a = BertPooler(lowerCamelCase ) __a = nn.Dropout(config.hidden_dropout_prob ) __a = nn.Linear(config.hidden_size , config.num_labels ) def a__ ( self , lowerCamelCase ): # Pooler __a = encoder_outputs[0] __a = self.pooler(lowerCamelCase ) # "return" pooler_output # BertModel __a = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __a = bmodel_output[1] __a = self.dropout(lowerCamelCase ) __a = self.classifier(lowerCamelCase ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """, snake_case_, ) class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase ): super().__init__(lowerCamelCase ) __a = config.num_labels __a = config.num_hidden_layers __a = DeeBertModel(lowerCamelCase ) __a = nn.Dropout(config.hidden_dropout_prob ) __a = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCamelCase ) def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ): __a = self.num_layers try: __a = self.bert( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __a = outputs[1] __a = self.dropout(lowerCamelCase ) __a = self.classifier(lowerCamelCase ) __a = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __a = e.message __a = e.exit_layer __a = outputs[0] if not self.training: __a = entropy(lowerCamelCase ) __a = [] __a = [] if labels is not None: if self.num_labels == 1: # We are doing regression __a = MSELoss() __a = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __a = CrossEntropyLoss() __a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __a = [] for highway_exit in outputs[-1]: __a = highway_exit[0] if not self.training: highway_logits_all.append(lowerCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __a = MSELoss() __a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __a = CrossEntropyLoss() __a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCamelCase ) if train_highway: __a = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __a = (loss,) + outputs if not self.training: __a = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __a = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
268
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase = logging.get_logger(__name__) def _lowercase ( lowercase__ ): __lowerCAmelCase : List[str] = SwinConfig( embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['''stage2''', '''stage3''', '''stage4'''] , ) __lowerCAmelCase : Optional[int] = DetaConfig( backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , ) # set labels __lowerCAmelCase : Optional[int] = """huggingface/label-files""" if "o365" in model_name: __lowerCAmelCase : Dict = 3_6_6 __lowerCAmelCase : List[str] = """object365-id2label.json""" else: __lowerCAmelCase : str = 9_1 __lowerCAmelCase : Optional[int] = """coco-detection-id2label.json""" __lowerCAmelCase : str = num_labels __lowerCAmelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) ) , '''r''' ) ) __lowerCAmelCase : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} __lowerCAmelCase : str = idalabel __lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def _lowercase ( lowercase__ ): __lowerCAmelCase : Tuple = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') ) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') ) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') ) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') ) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') ) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Any = dct.pop(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase : Optional[int] = val def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCAmelCase : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) __lowerCAmelCase : int = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : str = in_proj_weight[:dim, :] __lowerCAmelCase : int = in_proj_bias[: dim] __lowerCAmelCase : int = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase : List[str] = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase : int = in_proj_weight[ -dim :, : ] __lowerCAmelCase : List[str] = in_proj_bias[-dim :] # fmt: on def _lowercase ( lowercase__ , lowercase__ ): __lowerCAmelCase : int = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention __lowerCAmelCase : Optional[int] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) __lowerCAmelCase : Tuple = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase : Tuple = in_proj_weight[:hidden_size, :] __lowerCAmelCase : Union[str, Any] = in_proj_bias[:hidden_size] __lowerCAmelCase : Tuple = in_proj_weight[ hidden_size : hidden_size * 2, : ] __lowerCAmelCase : List[Any] = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase : Optional[Any] = in_proj_weight[-hidden_size:, :] __lowerCAmelCase : Any = in_proj_bias[-hidden_size:] def _lowercase ( ): __lowerCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _lowercase ( lowercase__ , lowercase__ , lowercase__ ): __lowerCAmelCase : Union[str, Any] = get_deta_config(SCREAMING_SNAKE_CASE__ ) # load original state dict if model_name == "deta-swin-large": __lowerCAmelCase : Dict = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' ) elif model_name == "deta-swin-large-o365": __lowerCAmelCase : str = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' ) else: raise ValueError(f"""Model name {model_name} not supported""" ) __lowerCAmelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )["""model"""] # original state dict for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) # rename keys __lowerCAmelCase : int = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: __lowerCAmelCase : int = state_dict.pop(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase : List[str] = val if "input_proj" in key: __lowerCAmelCase : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase : List[str] = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: __lowerCAmelCase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) __lowerCAmelCase : Optional[Any] = val # finally, create HuggingFace model and load state dict __lowerCAmelCase : Tuple = DetaForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu""" model.to(SCREAMING_SNAKE_CASE__ ) # load image processor __lowerCAmelCase : Any = DetaImageProcessor(format='''coco_detection''' ) # verify our conversion on image __lowerCAmelCase : str = prepare_img() __lowerCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) __lowerCAmelCase : List[str] = encoding["""pixel_values"""] __lowerCAmelCase : Optional[int] = model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) ) # verify logits print('''Logits:''' , outputs.logits[0, :3, :3] ) print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": __lowerCAmelCase : Union[str, Any] = torch.tensor( [[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] ) __lowerCAmelCase : List[Any] = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] ) elif model_name == "deta-swin-large-o365": __lowerCAmelCase : List[str] = torch.tensor( [[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] ) __lowerCAmelCase : Optional[Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 ) print('''Everything ok!''' ) if pytorch_dump_folder_path: # Save model and processor logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''' ) model.push_to_hub(f"""jozhang97/{model_name}""" ) processor.push_to_hub(f"""jozhang97/{model_name}""" ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="deta-swin-large", choices=["deta-swin-large", "deta-swin-large-o365"], help="Name of the model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _UpperCamelCase = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
275
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class lowercase__ ( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ): _SCREAMING_SNAKE_CASE : str = [10, 20, 30, 40, 50, 60] _SCREAMING_SNAKE_CASE : List[str] = [2, 4, 6, 8, 10, 12] _SCREAMING_SNAKE_CASE : str = 100 self.assertEqual(kp.calc_profit(__snake_case , __snake_case , __snake_case ) , 210 ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """Weight can not be negative.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """Profit can not be negative.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex(__snake_case , """max_weight must greater than zero.""" ) def UpperCAmelCase_ ( self ): self.assertRaisesRegex( __snake_case , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
200
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) _snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) _snake_case = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , ) torch.manual_seed(0 ) _snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , ) _snake_case = CLIPTextModel(lowerCAmelCase_ ) _snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ ) _snake_case = CLIPTextModelWithProjection(lowerCAmelCase_ ) _snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=lowerCAmelCase_ ) _snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> List[str]: _snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) _snake_case = image / 2 + 0.5 if str(lowerCAmelCase_ ).startswith('mps' ): _snake_case = torch.manual_seed(lowerCAmelCase_ ) else: _snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) _snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def lowerCAmelCase ( self ) -> int: _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = sd_pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _snake_case = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self ) -> Dict: super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def lowerCAmelCase ( self ) -> int: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCAmelCase ( self ) -> Union[str, Any]: pass def lowerCAmelCase ( self ) -> Any: _snake_case = self.get_dummy_components() _snake_case = StableDiffusionXLImgaImgPipeline(**lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) _snake_case = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) # forward without prompt embeds _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = 3 * ['this is a negative prompt'] _snake_case = negative_prompt _snake_case = 3 * [inputs['prompt']] _snake_case = sd_pipe(**lowerCAmelCase_ ) _snake_case = output.images[0, -3:, -3:, -1] # forward with prompt embeds _snake_case = self.get_dummy_inputs(lowerCAmelCase_ ) _snake_case = 3 * ['this is a negative prompt'] _snake_case = 3 * [inputs.pop('prompt' )] ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = sd_pipe.encode_prompt(lowerCAmelCase_ , negative_prompt=lowerCAmelCase_ ) _snake_case = sd_pipe( **lowerCAmelCase_ , prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , pooled_prompt_embeds=lowerCAmelCase_ , negative_pooled_prompt_embeds=lowerCAmelCase_ , ) _snake_case = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_="cpu" , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ) -> List[str]: _snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) _snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) ) _snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) _snake_case = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def lowerCAmelCase ( self ) -> str: _snake_case = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) _snake_case = self.get_inputs(lowerCAmelCase_ ) _snake_case = pipe(**lowerCAmelCase_ ).images _snake_case = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _snake_case = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
295
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ = logging.get_logger(__name__) class UpperCamelCase_ ( _lowerCamelCase ): def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None: warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , lowerCAmelCase_ , ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
295
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__: Any = logging.get_logger(__name__) UpperCamelCase__: Optional[int] = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = """sew-d""" def __init__( self : Dict , __snake_case : int=32 , __snake_case : Union[str, Any]=768 , __snake_case : int=12 , __snake_case : Optional[Any]=12 , __snake_case : List[Any]=3072 , __snake_case : int=2 , __snake_case : Tuple=512 , __snake_case : Optional[int]=256 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : List[str]=("p2c", "c2p") , __snake_case : Optional[Any]="layer_norm" , __snake_case : Optional[int]="gelu_python" , __snake_case : Dict=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : List[Any]=0.0 , __snake_case : str=0.1 , __snake_case : Dict=0.02 , __snake_case : Union[str, Any]=1E-7 , __snake_case : List[Any]=1E-5 , __snake_case : Optional[Any]="group" , __snake_case : List[Any]="gelu" , __snake_case : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __snake_case : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __snake_case : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __snake_case : Tuple=False , __snake_case : Optional[Any]=128 , __snake_case : List[str]=16 , __snake_case : Tuple=True , __snake_case : Any=0.05 , __snake_case : Optional[Any]=10 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[Any]=0.0 , __snake_case : int=10 , __snake_case : int=0 , __snake_case : Union[str, Any]="mean" , __snake_case : Any=False , __snake_case : int=False , __snake_case : Any=256 , __snake_case : Optional[int]=0 , __snake_case : Optional[Any]=1 , __snake_case : str=2 , **__snake_case : int , ) -> Any: super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case ) UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : int = feat_extract_norm UpperCAmelCase : List[str] = feat_extract_activation UpperCAmelCase : Tuple = list(__snake_case ) UpperCAmelCase : Any = list(__snake_case ) UpperCAmelCase : Optional[int] = list(__snake_case ) UpperCAmelCase : Any = conv_bias UpperCAmelCase : Optional[Any] = num_conv_pos_embeddings UpperCAmelCase : Optional[Any] = num_conv_pos_embedding_groups UpperCAmelCase : List[str] = len(self.conv_dim ) UpperCAmelCase : int = num_hidden_layers UpperCAmelCase : Any = intermediate_size UpperCAmelCase : Tuple = squeeze_factor UpperCAmelCase : List[Any] = max_position_embeddings UpperCAmelCase : List[str] = position_buckets UpperCAmelCase : List[str] = share_att_key UpperCAmelCase : Union[str, Any] = relative_attention UpperCAmelCase : List[str] = norm_rel_ebd UpperCAmelCase : Optional[int] = list(__snake_case ) UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : int = num_attention_heads UpperCAmelCase : Union[str, Any] = hidden_dropout UpperCAmelCase : Tuple = attention_dropout UpperCAmelCase : List[str] = activation_dropout UpperCAmelCase : Optional[int] = feat_proj_dropout UpperCAmelCase : List[Any] = final_dropout UpperCAmelCase : int = layer_norm_eps UpperCAmelCase : Tuple = feature_layer_norm_eps UpperCAmelCase : List[str] = initializer_range UpperCAmelCase : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : Dict = apply_spec_augment UpperCAmelCase : str = mask_time_prob UpperCAmelCase : Tuple = mask_time_length UpperCAmelCase : Optional[int] = mask_time_min_masks UpperCAmelCase : Optional[int] = mask_feature_prob UpperCAmelCase : Optional[Any] = mask_feature_length UpperCAmelCase : str = mask_feature_min_masks # ctc loss UpperCAmelCase : str = ctc_loss_reduction UpperCAmelCase : Optional[int] = ctc_zero_infinity # sequence classification UpperCAmelCase : Any = use_weighted_layer_sum UpperCAmelCase : int = classifier_proj_size @property def A ( self : Dict ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
23
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase__: int = logging.get_logger(__name__) UpperCamelCase__: Dict = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } UpperCamelCase__: Optional[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def snake_case_ ( _lowerCAmelCase : str ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = {} with open(_lowerCAmelCase , '''r''' ) as file: for line_number, line in enumerate(_lowerCAmelCase ): UpperCAmelCase : List[str] = line.strip() if line: UpperCAmelCase : str = line.split() UpperCAmelCase : Union[str, Any] = line_number UpperCAmelCase : List[Any] = words[0] UpperCAmelCase : Union[str, Any] = value return result def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> int: for attribute in key.split('''.''' ): UpperCAmelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_lowerCAmelCase ): UpperCAmelCase : Any = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase : Dict = '''param''' if weight_type is not None and weight_type != "param": UpperCAmelCase : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase : List[Any] = hf_pointer for attribute in hf_param_name.split('''.''' ): UpperCAmelCase : Optional[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : int = shape_pointer.shape # let's reduce dimension UpperCAmelCase : Union[str, Any] = value[0] else: UpperCAmelCase : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase : int = value elif weight_type == "weight_g": UpperCAmelCase : str = value elif weight_type == "weight_v": UpperCAmelCase : Dict = value elif weight_type == "bias": UpperCAmelCase : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): UpperCAmelCase : int = getattr(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Optional[int] = value else: UpperCAmelCase : Tuple = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> List[Any]: UpperCAmelCase : List[str] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(_lowerCAmelCase ): UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase : Any = '''param''' if weight_type is not None and weight_type != "param": UpperCAmelCase : Optional[int] = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase : Optional[int] = '''.'''.join([key, hf_param_name] ) else: UpperCAmelCase : List[Any] = key UpperCAmelCase : Tuple = value if '''lm_head''' in full_key else value[0] UpperCamelCase__: Tuple = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None ) -> int: UpperCAmelCase : List[Any] = False for key, mapped_key in MAPPING.items(): UpperCAmelCase : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase : Optional[Any] = True if "*" in mapped_key: UpperCAmelCase : Tuple = name.split(_lowerCAmelCase )[0].split('''.''' )[-2] UpperCAmelCase : List[Any] = mapped_key.replace('''*''' , _lowerCAmelCase ) if "weight_g" in name: UpperCAmelCase : str = '''weight_g''' elif "weight_v" in name: UpperCAmelCase : int = '''weight_v''' elif "bias" in name: UpperCAmelCase : int = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase : List[str] = '''weight''' else: UpperCAmelCase : Dict = None if hf_dict is not None: rename_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return is_used return is_used def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Any: UpperCAmelCase : Dict = [] UpperCAmelCase : Dict = fairseq_model.state_dict() UpperCAmelCase : Union[str, Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : Dict = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase : Any = True else: UpperCAmelCase : Optional[Any] = load_wavaveca_layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]: UpperCAmelCase : Any = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase : Optional[int] = name.split('''.''' ) UpperCAmelCase : Tuple = int(items[0] ) UpperCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=False ) -> Dict: if config_path is not None: UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) else: UpperCAmelCase : List[Any] = WavaVecaConfig() if is_seq_class: UpperCAmelCase : Optional[Any] = read_txt_into_dict(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = idalabel UpperCAmelCase : Optional[Any] = WavaVecaForSequenceClassification(_lowerCAmelCase ) UpperCAmelCase : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) feature_extractor.save_pretrained(_lowerCAmelCase ) elif is_finetuned: if dict_path: UpperCAmelCase : Dict = Dictionary.load(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase : Any = target_dict.pad_index UpperCAmelCase : Tuple = target_dict.bos_index UpperCAmelCase : Optional[int] = target_dict.eos_index UpperCAmelCase : Union[str, Any] = len(target_dict.symbols ) UpperCAmelCase : Dict = os.path.join(_lowerCAmelCase , '''vocab.json''' ) if not os.path.isdir(_lowerCAmelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) UpperCAmelCase : List[Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase : List[str] = 0 UpperCAmelCase : List[str] = 1 with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Optional[int] = WavaVecaCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCAmelCase , ) UpperCAmelCase : int = True if config.feat_extract_norm == '''layer''' else False UpperCAmelCase : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) UpperCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(_lowerCAmelCase ) else: UpperCAmelCase : Dict = WavaVecaForPreTraining(_lowerCAmelCase ) if is_finetuned or is_seq_class: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCAmelCase : Optional[Any] = argparse.Namespace(task='''audio_pretraining''' ) UpperCAmelCase : List[Any] = fairseq.tasks.setup_task(_lowerCAmelCase ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase ) UpperCAmelCase : Optional[int] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase__: Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) UpperCamelCase__: Any = parser.parse_args() UpperCamelCase__: int = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
23
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase : Union[str, Any] =logging.get_logger(__name__) _lowercase : str ={ "facebook/deit-base-distilled-patch16-224": ( "https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :str = "deit" def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1E-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , **__lowercase , ) -> List[Any]: """simple docstring""" super().__init__(**__lowercase ) a__ : str = hidden_size a__ : Union[str, Any] = num_hidden_layers a__ : Union[str, Any] = num_attention_heads a__ : Dict = intermediate_size a__ : str = hidden_act a__ : Tuple = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : Optional[int] = initializer_range a__ : Tuple = layer_norm_eps a__ : Any = image_size a__ : str = patch_size a__ : Union[str, Any] = num_channels a__ : List[str] = qkv_bias a__ : Union[str, Any] = encoder_stride class snake_case__ (A__ ): """simple docstring""" __lowerCAmelCase :List[str] = version.parse("1.11" ) @property def SCREAMING_SNAKE_CASE__( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def SCREAMING_SNAKE_CASE__( self ) -> float: """simple docstring""" return 1E-4
266
from __future__ import annotations import math def lowerCAmelCase_ ( _lowercase : float , _lowercase : int) -> float: """simple docstring""" a__ : Union[str, Any] = u for i in range(1 , _lowercase): a__ : Optional[int] = temp * (u - i) return temp def lowerCAmelCase_ ( ) -> None: """simple docstring""" a__ : Tuple = int(input("""enter the numbers of values: """)) a__ : list[list[float]] = [] for _ in range(_lowercase): y.append([]) for i in range(_lowercase): for j in range(_lowercase): y[i].append(_lowercase) a__ : Optional[Any] = 0 print("""enter the values of parameters in a list: """) a__ : List[Any] = list(map(_lowercase , input().split())) print("""enter the values of corresponding parameters: """) for i in range(_lowercase): a__ : Optional[Any] = float(input()) a__ : Tuple = int(input("""enter the value to interpolate: """)) a__ : int = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , _lowercase): for j in range(n - i): a__ : int = y[j + 1][i - 1] - y[j][i - 1] a__ : Optional[int] = y[0][0] for i in range(1 , _lowercase): summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase) print(F'''the value at {value} is {summ}''') if __name__ == "__main__": main()
266
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json lowerCamelCase_ = '''sshleifer/mar_enro_6_3_student''' class __A( A__ ): """simple docstring""" def UpperCAmelCase_ (self ): super().setUp() UpperCamelCase__ = cached_path( """https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=__A , ) UpperCamelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def UpperCAmelCase_ (self ): MarianMTModel.from_pretrained(__A ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ = { """$MAX_LEN""": 64, """$BS""": 64, """$GAS""": 1, """$ENRO_DIR""": self.data_dir, """facebook/mbart-large-cc25""": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", """--learning_rate=3e-5""": """--learning_rate 3e-4""", """--num_train_epochs 6""": """--num_train_epochs 1""", } # Clean up bash script UpperCamelCase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip() UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) for k, v in env_vars_to_replace.items(): UpperCamelCase__ = bash_script.replace(__A , str(__A ) ) UpperCamelCase__ = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCamelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCamelCase__ = ["""finetune.py"""] + bash_script.split() + args with patch.object(__A , """argv""" , __A ): UpperCamelCase__ = argparse.ArgumentParser() UpperCamelCase__ = pl.Trainer.add_argparse_args(__A ) UpperCamelCase__ = SummarizationModule.add_model_specific_args(__A , os.getcwd() ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = main(__A ) # Check metrics UpperCamelCase__ = load_json(model.metrics_save_path ) UpperCamelCase__ = metrics["""val"""][0] UpperCamelCase__ = metrics["""val"""][-1] self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __A ) self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCamelCase__ = os.listdir(__A ) UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0] UpperCamelCase__ = os.path.join(args.output_dir , __A ) UpperCamelCase__ = torch.load(__A , map_location="""cpu""" ) UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCamelCase__ = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1 class __A( A__ ): """simple docstring""" @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCamelCase__ = { """--fp16_opt_level=O1""": """""", """$MAX_LEN""": 1_28, """$BS""": 16, """$GAS""": 1, """$ENRO_DIR""": data_dir, """$m""": """sshleifer/student_marian_en_ro_6_1""", """val_check_interval=0.25""": """val_check_interval=1.0""", } # Clean up bash script UpperCamelCase__ = ( (self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip() ) UpperCamelCase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" ) UpperCamelCase__ = bash_script.replace("""--fp16 """ , """ """ ) for k, v in env_vars_to_replace.items(): UpperCamelCase__ = bash_script.replace(__A , str(__A ) ) UpperCamelCase__ = self.get_auto_remove_tmp_dir() UpperCamelCase__ = bash_script.replace("""--fp16""" , """""" ) UpperCamelCase__ = 6 UpperCamelCase__ = ( ["""distillation.py"""] + bash_script.split() + [ F"--output_dir={output_dir}", """--gpus=1""", """--learning_rate=1e-3""", F"--num_train_epochs={epochs}", """--warmup_steps=10""", """--val_check_interval=1.0""", """--do_predict""", ] ) with patch.object(__A , """argv""" , __A ): UpperCamelCase__ = argparse.ArgumentParser() UpperCamelCase__ = pl.Trainer.add_argparse_args(__A ) UpperCamelCase__ = SummarizationDistiller.add_model_specific_args(__A , os.getcwd() ) UpperCamelCase__ = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCamelCase__ = distill_main(__A ) # Check metrics UpperCamelCase__ = load_json(model.metrics_save_path ) UpperCamelCase__ = metrics["""val"""][0] UpperCamelCase__ = metrics["""val"""][-1] assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , __A ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCamelCase__ = os.listdir(__A ) UpperCamelCase__ = [x for x in contents if x.endswith(""".ckpt""" )][0] UpperCamelCase__ = os.path.join(args.output_dir , __A ) UpperCamelCase__ = torch.load(__A , map_location="""cpu""" ) UpperCamelCase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight""" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCamelCase__ = {os.path.basename(__A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["""test"""] ) == 1
244
"""simple docstring""" from ...configuration_utils import PretrainedConfig class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :str = "bert-generation" def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) lowerCAmelCase_ :Any = vocab_size lowerCAmelCase_ :List[Any] = hidden_size lowerCAmelCase_ :Optional[int] = num_hidden_layers lowerCAmelCase_ :int = num_attention_heads lowerCAmelCase_ :List[Any] = hidden_act lowerCAmelCase_ :Optional[Any] = intermediate_size lowerCAmelCase_ :List[Any] = hidden_dropout_prob lowerCAmelCase_ :int = attention_probs_dropout_prob lowerCAmelCase_ :Tuple = max_position_embeddings lowerCAmelCase_ :List[str] = initializer_range lowerCAmelCase_ :Union[str, Any] = layer_norm_eps lowerCAmelCase_ :List[str] = position_embedding_type lowerCAmelCase_ :Optional[int] = use_cache
84
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(A : Optional[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A : Dict ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) a = [] for i in range(A ): a = i / num_diffusion_timesteps a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) ) return torch.tensor(A , dtype=torch.floataa ) class _lowercase ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" __A = [e.name for e in KarrasDiffusionSchedulers] __A = 2 @register_to_config def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ): """simple docstring""" if trained_betas is not None: a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "linear": a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule a = betas_for_alpha_bar(lowerCamelCase_ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) a = 1.0 - self.betas a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if schedule_timesteps is None: a = self.timesteps a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: a = 1 if len(lowerCamelCase_ ) > 1 else 0 else: a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep a = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase_ (self ): """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" a = self.index_for_timestep(lowerCamelCase_ ) if self.state_in_first_order: a = self.sigmas[step_index] else: a = self.sigmas_interpol[step_index] a = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ): """simple docstring""" a = num_inference_steps a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy() elif self.config.timestep_spacing == "leading": a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ ) a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ ) a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ ) # interpolate sigmas a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowerCamelCase_ ).startswith("mps" ): # mps does not support float64 a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa ) else: a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ ) # interpolate timesteps a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype ) a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() a = torch.cat([timesteps[:1], interleaved_timesteps] ) a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter a = defaultdict(lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = sigma.log() # get distribution a = log_sigma - self.log_sigmas[:, None] # get sigmas range a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) a = low_idx + 1 a = self.log_sigmas[low_idx] a = self.log_sigmas[high_idx] # interpolate sigmas a = (low - log_sigma) / (low - high) a = w.clamp(0 , 1 ) # transform interpolation to time range a = (1 - w) * low_idx + w * high_idx a = t.view(sigma.shape ) return t @property def UpperCamelCase_ (self ): """simple docstring""" return self.sample is None def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ): """simple docstring""" a = self.index_for_timestep(lowerCamelCase_ ) # advance index counter by 1 a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: a = self.sigmas[step_index] a = self.sigmas_interpol[step_index + 1] a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method a = self.sigmas[step_index - 1] a = self.sigmas_interpol[step_index] a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API a = 0 a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": a = sigma_hat if self.state_in_first_order else sigma_interpol a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": a = sigma_hat if self.state_in_first_order else sigma_interpol a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("prediction_type not implemented yet: sample" ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep a = sigma_interpol - sigma_hat # store for 2nd order step a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep a = sigma_next - sigma_hat a = self.sample a = None a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ): # mps does not support float64 a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: a = self.timesteps.to(original_samples.device ) a = timesteps.to(original_samples.device ) a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps] a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): a = sigma.unsqueeze(-1 ) a = original_samples + noise * sigma return noisy_samples def __len__(self ): """simple docstring""" return self.config.num_train_timesteps
71
def a( ) -> str: """simple docstring""" a = 0 for i in range(1 , 1001 ): total += i**i return str(A )[-10:] if __name__ == "__main__": print(solution())
71
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''', [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ], ) def snake_case_ ( A_ : Union[str, Any], A_ : Any, A_ : str, A_ : List[str], A_ : List[Any], A_ : int, A_ : List[Any], A_ : Any, A_ : int, A_ : List[Any], A_ : int, A_ : Any, ): '''simple docstring''' _lowerCamelCase : str = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } _lowerCamelCase , _lowerCamelCase : Optional[Any] = input_paths_and_base_extractors[compression_format] if input_path is None: _lowerCamelCase : List[Any] = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(A_ ) assert base_extractor.is_extractable(A_ ) _lowerCamelCase : Union[str, Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(A_, A_ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCamelCase : List[str] = file_path.read_text(encoding='''utf-8''' ) else: _lowerCamelCase : int = output_path.read_text(encoding='''utf-8''' ) _lowerCamelCase : Union[str, Any] = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''', [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ], ) def snake_case_ ( A_ : Union[str, Any], A_ : List[str], A_ : List[str], A_ : List[Any], A_ : int, A_ : List[str], A_ : Optional[Any], A_ : str, A_ : int, A_ : Optional[int], A_ : int, A_ : int, ): '''simple docstring''' _lowerCamelCase : str = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } _lowerCamelCase : Dict = input_paths[compression_format] if input_path is None: _lowerCamelCase : Union[str, Any] = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(A_ ) _lowerCamelCase : Optional[Any] = Extractor.infer_extractor_format(A_ ) assert extractor_format is not None _lowerCamelCase : int = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(A_, A_, A_ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCamelCase : List[str] = file_path.read_text(encoding='''utf-8''' ) else: _lowerCamelCase : Optional[Any] = output_path.read_text(encoding='''utf-8''' ) _lowerCamelCase : List[Any] = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def snake_case_ ( A_ : List[Any], A_ : Optional[Any] ): '''simple docstring''' import tarfile _lowerCamelCase : str = tmp_path / '''data_dot_dot''' directory.mkdir() _lowerCamelCase : List[str] = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(A_, '''w''' ) as f: f.add(A_, arcname=os.path.join('''..''', text_file.name ) ) return path @pytest.fixture def snake_case_ ( A_ : List[Any] ): '''simple docstring''' import tarfile _lowerCamelCase : List[str] = tmp_path / '''data_sym_link''' directory.mkdir() _lowerCamelCase : str = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''', directory / '''subdir''', target_is_directory=A_ ) with tarfile.TarFile(A_, '''w''' ) as f: f.add(str(directory / '''subdir''' ), arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''', [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')], ) def snake_case_ ( A_ : List[str], A_ : Optional[Any], A_ : Tuple, A_ : Optional[int], A_ : Tuple, A_ : int ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } _lowerCamelCase : Optional[Any] = insecure_tar_files[insecure_tar_file] _lowerCamelCase : str = tmp_path / '''extracted''' TarExtractor.extract(A_, A_ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def snake_case_ ( A_ : Optional[int] ): '''simple docstring''' _lowerCamelCase : Optional[int] = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 _lowerCamelCase : Union[str, Any] = ( b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(A_ ) assert zipfile.is_zipfile(str(A_ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(A_ ) # but we're right
72
class A : '''simple docstring''' def __init__(self : List[str] ) -> Tuple: """simple docstring""" lowercase__ = 0 lowercase__ = 0 lowercase__ = {} def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" if vertex not in self.adjacency: lowercase__ = {} self.num_vertices += 1 def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Tuple: """simple docstring""" self.add_vertex(_UpperCAmelCase ) self.add_vertex(_UpperCAmelCase ) if head == tail: return lowercase__ = weight lowercase__ = weight def lowerCamelCase__ (self : List[str] ) -> Optional[int]: """simple docstring""" lowercase__ = self.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ = edge edges.remove((tail, head, weight) ) for i in range(len(_UpperCAmelCase ) ): lowercase__ = list(edges[i] ) edges.sort(key=lambda _UpperCAmelCase : e[2] ) for i in range(len(_UpperCAmelCase ) - 1 ): if edges[i][2] >= edges[i + 1][2]: lowercase__ = edges[i][2] + 1 for edge in edges: lowercase__ , lowercase__ , lowercase__ = edge lowercase__ = weight lowercase__ = weight def __str__(self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = """""" for tail in self.adjacency: for head in self.adjacency[tail]: lowercase__ = self.adjacency[head][tail] string += f'''{head} -> {tail} == {weight}\n''' return string.rstrip("""\n""" ) def lowerCamelCase__ (self : Any ) -> str: """simple docstring""" lowercase__ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]: """simple docstring""" return self.adjacency.keys() @staticmethod def lowerCamelCase__ (_UpperCAmelCase : List[str]=None , _UpperCAmelCase : Any=None ) -> Union[str, Any]: """simple docstring""" lowercase__ = Graph() if vertices is None: lowercase__ = [] if edges is None: lowercase__ = [] for vertex in vertices: g.add_vertex(_UpperCAmelCase ) for edge in edges: g.add_edge(*_UpperCAmelCase ) return g class A : '''simple docstring''' def __init__(self : Optional[Any] ) -> str: """simple docstring""" lowercase__ = {} lowercase__ = {} def __len__(self : Optional[Any] ) -> Dict: """simple docstring""" return len(self.parent ) def lowerCamelCase__ (self : str , _UpperCAmelCase : Dict ) -> Any: """simple docstring""" if item in self.parent: return self.find(_UpperCAmelCase ) lowercase__ = item lowercase__ = 0 return item def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> Any: """simple docstring""" if item not in self.parent: return self.make_set(_UpperCAmelCase ) if item != self.parent[item]: lowercase__ = self.find(self.parent[item] ) return self.parent[item] def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = self.find(_UpperCAmelCase ) lowercase__ = self.find(_UpperCAmelCase ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: lowercase__ = roota return roota if self.rank[roota] < self.rank[roota]: lowercase__ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 lowercase__ = roota return roota return None @staticmethod def lowerCamelCase__ (_UpperCAmelCase : str ) -> Optional[int]: """simple docstring""" lowercase__ = graph.num_vertices lowercase__ = Graph.UnionFind() lowercase__ = [] while num_components > 1: lowercase__ = {} for vertex in graph.get_vertices(): lowercase__ = -1 lowercase__ = graph.get_edges() for edge in edges: lowercase__ , lowercase__ , lowercase__ = edge edges.remove((tail, head, weight) ) for edge in edges: lowercase__ , lowercase__ , lowercase__ = edge lowercase__ = union_find.find(_UpperCAmelCase ) lowercase__ = union_find.find(_UpperCAmelCase ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: lowercase__ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: lowercase__ , lowercase__ , lowercase__ = cheap_edge[vertex] if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ): union_find.union(_UpperCAmelCase , _UpperCAmelCase ) mst_edges.append(cheap_edge[vertex] ) lowercase__ = num_components - 1 lowercase__ = Graph.build(edges=_UpperCAmelCase ) return mst
305
0
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase_ : Union[str, Any] ="src/transformers" # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Tuple =direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCAmelCase_ : Dict =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase_ : Dict =re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)") UpperCAmelCase_ : str ={ "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", } def UpperCamelCase ( _A : int )-> Optional[Any]: """simple docstring""" A__ = None # source code of `config_class` A__ = inspect.getsource(_A ) A__ = _re_checkpoint.findall(_A ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): A__ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link A__ = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: A__ = ckpt_name break return checkpoint def UpperCamelCase ( )-> Any: """simple docstring""" A__ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue A__ = get_checkpoint_from_config_class(_A ) A__ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_A ) if len(_A ) > 0: A__ = "\n".join(sorted(_A ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
351
import datasets from .evaluate import evaluate UpperCAmelCase_ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n" UpperCAmelCase_ : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n" UpperCAmelCase_ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def __A ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} A__ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] A__ = evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__ ) return score
198
0