code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import os import sys import unittest lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase = os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase = ''' {0} = None ''' lowerCAmelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) ''' lowerCAmelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' class A ( unittest.TestCase ): def _A (self ): __lowercase= find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' ) self.assertIsNone(lowerCAmelCase ) __lowercase= find_backend(' if not is_tokenizers_available():' ) self.assertEqual(lowerCAmelCase , 'tokenizers' ) __lowercase= find_backend(' if not is_tensorflow_text_available():' ) self.assertEqual(lowerCAmelCase , 'tensorflow_text' ) __lowercase= find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' ) __lowercase= find_backend( ' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' ) __lowercase= find_backend( ' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' ) self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' ) def _A (self ): __lowercase= read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , lowerCAmelCase ) self.assertIn('tensorflow_text' , lowerCAmelCase ) self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertModel' , objects['tf'] ) self.assertIn('FlaxBertModel' , objects['flax'] ) self.assertIn('BertModel' , objects['torch'] ) self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] ) self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] ) def _A (self ): __lowercase= create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' ) __lowercase= create_dummy_object('function' , '\'torch\'' ) self.assertEqual( lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) __lowercase= '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n' __lowercase= create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n' __lowercase= create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
295
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
1
from ..utils import DummyObject, requires_backends class A ( metaclass=A_ ): UpperCamelCase_ : List[Any] =['''note_seq'''] def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): requires_backends(self , ['note_seq'] ) @classmethod def _A (cls , *lowerCAmelCase , **lowerCAmelCase ): requires_backends(cls , ['note_seq'] ) @classmethod def _A (cls , *lowerCAmelCase , **lowerCAmelCase ): requires_backends(cls , ['note_seq'] )
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class A ( A_ ): UpperCamelCase_ : Optional[int] ='''openai/whisper-base''' UpperCamelCase_ : Optional[Any] =( '''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ''' '''transcribed text.''' ) UpperCamelCase_ : Optional[int] ='''transcriber''' UpperCamelCase_ : str =WhisperProcessor UpperCamelCase_ : Tuple =WhisperForConditionalGeneration UpperCamelCase_ : Optional[Any] =['''audio'''] UpperCamelCase_ : List[Any] =['''text'''] def _A (self , lowerCAmelCase ): return self.pre_processor(lowerCAmelCase , return_tensors='pt' ).input_features def _A (self , lowerCAmelCase ): return self.model.generate(inputs=lowerCAmelCase ) def _A (self , lowerCAmelCase ): return self.pre_processor.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )[0]
295
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } lowerCAmelCase = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } lowerCAmelCase = '''▁''' class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict =AlbertTokenizer def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="[CLS]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="<unk>" , lowerCAmelCase="[SEP]" , lowerCAmelCase="<pad>" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , **lowerCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __lowercase= ( AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= do_lower_case __lowercase= remove_space __lowercase= keep_accents __lowercase= vocab_file __lowercase= False if not self.vocab_file else True def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) return (out_vocab_file,)
295
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
1
def _lowerCamelCase( lowercase__ ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(lowercase__ , lowercase__ ): raise ValueError('Length must be a positive.' ) return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _lowerCamelCase( lowercase__ ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(lowercase__ , lowercase__ ): raise ValueError('Length must be a positive.' ) return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
295
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
1
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' while second != 0: __lowercase= first & second first ^= second __lowercase= c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = int(input('''Enter the first number: ''').strip()) lowerCAmelCase = int(input('''Enter the second number: ''').strip()) print(F'{add(first, second) = }')
295
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
1
def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0 for i in range(len(lowercase__ ) )] # initialize interval's left pointer and right pointer __lowercase, __lowercase= 0, 0 for i in range(1 , len(lowercase__ ) ): # case when current index is inside the interval if i <= right_pointer: __lowercase= min(right_pointer - i + 1 , z_result[i - left_pointer] ) __lowercase= min_edge while go_next(lowercase__ , lowercase__ , lowercase__ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __lowercase, __lowercase= i, i + z_result[i] - 1 return z_result def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> bool: '''simple docstring''' return i + z_result[i] < len(lowercase__ ) and s[z_result[i]] == s[i + z_result[i]] def _lowerCamelCase( lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __lowercase= z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(lowercase__ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
295
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowerCAmelCase = logging.get_logger(__name__) # General docstring lowerCAmelCase = '''ResNetConfig''' # Base docstring lowerCAmelCase = '''microsoft/resnet-50''' lowerCAmelCase = [1, 2_0_4_8, 7, 7] # Image classification docstring lowerCAmelCase = '''microsoft/resnet-50''' lowerCAmelCase = '''tiger cat''' lowerCAmelCase = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" ): super().__init__() __lowercase= nn.Convad( lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , bias=lowerCAmelCase ) __lowercase= nn.BatchNormad(lowerCAmelCase ) __lowercase= ACTaFN[activation] if activation is not None else nn.Identity() def _A (self , lowerCAmelCase ): __lowercase= self.convolution(lowerCAmelCase ) __lowercase= self.normalization(lowerCAmelCase ) __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self , lowerCAmelCase ): super().__init__() __lowercase= ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __lowercase= nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __lowercase= config.num_channels def _A (self , lowerCAmelCase ): __lowercase= pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase= self.embedder(lowerCAmelCase ) __lowercase= self.pooler(lowerCAmelCase ) return embedding class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 ): super().__init__() __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase ) __lowercase= nn.BatchNormad(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= self.convolution(lowerCAmelCase ) __lowercase= self.normalization(lowerCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "relu" ): super().__init__() __lowercase= in_channels != out_channels or stride != 1 __lowercase= ( ResNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase= nn.Sequential( ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , activation=lowerCAmelCase ) , ) __lowercase= ACTaFN[activation] def _A (self , lowerCAmelCase ): __lowercase= hidden_state __lowercase= self.layer(lowerCAmelCase ) __lowercase= self.shortcut(lowerCAmelCase ) hidden_state += residual __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , lowerCAmelCase = 4 ): super().__init__() __lowercase= in_channels != out_channels or stride != 1 __lowercase= out_channels // reduction __lowercase= ( ResNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase= nn.Sequential( ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) , ResNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , ) __lowercase= ACTaFN[activation] def _A (self , lowerCAmelCase ): __lowercase= hidden_state __lowercase= self.layer(lowerCAmelCase ) __lowercase= self.shortcut(lowerCAmelCase ) hidden_state += residual __lowercase= self.activation(lowerCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , ): super().__init__() __lowercase= ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer __lowercase= nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , activation=config.hidden_act ) , *[layer(lowerCAmelCase , lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def _A (self , lowerCAmelCase ): __lowercase= input for layer in self.layers: __lowercase= layer(lowerCAmelCase ) return hidden_state class A ( nn.Module ): def __init__(self , lowerCAmelCase ): super().__init__() __lowercase= nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase= zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase ) ) def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): __lowercase= () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase= hidden_states + (hidden_state,) __lowercase= stage_module(lowerCAmelCase ) if output_hidden_states: __lowercase= hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase , ) class A ( A_ ): UpperCamelCase_ : Any =ResNetConfig UpperCamelCase_ : Optional[int] ='''resnet''' UpperCamelCase_ : Optional[Any] ='''pixel_values''' UpperCamelCase_ : Tuple =True def _A (self , lowerCAmelCase ): if isinstance(lowerCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _A (self , lowerCAmelCase , lowerCAmelCase=False ): if isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= value lowerCAmelCase = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCAmelCase = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare ResNet model outputting raw features without any specific head on top.''' , A_ , ) class A ( A_ ): def __init__(self , lowerCAmelCase ): super().__init__(lowerCAmelCase ) __lowercase= config __lowercase= ResNetEmbeddings(lowerCAmelCase ) __lowercase= ResNetEncoder(lowerCAmelCase ) __lowercase= nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.embedder(lowerCAmelCase ) __lowercase= self.encoder( lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase ) __lowercase= encoder_outputs[0] __lowercase= self.pooler(lowerCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , A_ , ) class A ( A_ ): def __init__(self , lowerCAmelCase ): super().__init__(lowerCAmelCase ) __lowercase= config.num_labels __lowercase= ResNetModel(lowerCAmelCase ) # classification head __lowercase= nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= self.resnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase ) __lowercase= outputs.pooler_output if return_dict else outputs[1] __lowercase= self.classifier(lowerCAmelCase ) __lowercase= None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase= 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase= 'single_label_classification' else: __lowercase= 'multi_label_classification' if self.config.problem_type == "regression": __lowercase= MSELoss() if self.num_labels == 1: __lowercase= loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase= loss_fct(lowerCAmelCase , lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase= CrossEntropyLoss() __lowercase= loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase= BCEWithLogitsLoss() __lowercase= loss_fct(lowerCAmelCase , lowerCAmelCase ) if not return_dict: __lowercase= (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states ) @add_start_docstrings( ''' ResNet backbone, to be used with frameworks like DETR and MaskFormer. ''' , A_ , ) class A ( A_ , A_ ): def __init__(self , lowerCAmelCase ): super().__init__(lowerCAmelCase ) super()._init_backbone(lowerCAmelCase ) __lowercase= [config.embedding_size] + config.hidden_sizes __lowercase= ResNetEmbeddings(lowerCAmelCase ) __lowercase= ResNetEncoder(lowerCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @replace_return_docstrings(output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ): __lowercase= return_dict if return_dict is not None else self.config.use_return_dict __lowercase= ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase= self.embedder(lowerCAmelCase ) __lowercase= self.encoder(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase ) __lowercase= outputs.hidden_states __lowercase= () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __lowercase= (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase , )
295
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
1
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
1
from collections.abc import Callable import numpy as np def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> np.array: '''simple docstring''' __lowercase= int(np.ceil((x_end - xa) / step_size ) ) __lowercase= np.zeros((n + 1,) ) __lowercase= ya __lowercase= xa for k in range(lowercase__ ): __lowercase= y[k] + step_size * ode_func(lowercase__ , y[k] ) __lowercase= y[k] + ( (step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
295
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
1
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class A ( A_ ): UpperCamelCase_ : List[Any] ='''deit''' def __init__(self , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=2_2_4 , lowerCAmelCase=1_6 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=1_6 , **lowerCAmelCase , ): super().__init__(**lowerCAmelCase ) __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= image_size __lowercase= patch_size __lowercase= num_channels __lowercase= qkv_bias __lowercase= encoder_stride class A ( A_ ): UpperCamelCase_ : Dict =version.parse('''1.11''' ) @property def _A (self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _A (self ): return 1E-4
295
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
1
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _lowerCamelCase( lowercase__ ) -> Tuple: '''simple docstring''' __lowercase= OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): __lowercase= key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): __lowercase= key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __lowercase= key[key.find('patch_embed' ) + len('patch_embed' )] __lowercase= key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowercase__ )-1}' ) if "norm" in key: __lowercase= key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __lowercase= key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] __lowercase= key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowercase__ )-1}' ) if "layer_norm1" in key: __lowercase= key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: __lowercase= key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 __lowercase= key[key.find('block' ) + len('block' )] __lowercase= key.replace(F'block{idx}' , F'block.{int(lowercase__ )-1}' ) if "attn.q" in key: __lowercase= key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: __lowercase= key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: __lowercase= key.replace('attn' , 'attention.self' ) if "fc1" in key: __lowercase= key.replace('fc1' , 'dense1' ) if "fc2" in key: __lowercase= key.replace('fc2' , 'dense2' ) if "linear_pred" in key: __lowercase= key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: __lowercase= key.replace('linear_fuse.conv' , 'linear_fuse' ) __lowercase= key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __lowercase= key[key.find('linear_c' ) + len('linear_c' )] __lowercase= key.replace(F'linear_c{idx}' , F'linear_c.{int(lowercase__ )-1}' ) if "bot_conv" in key: __lowercase= key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: __lowercase= key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: __lowercase= key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: __lowercase= key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: __lowercase= key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: __lowercase= key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: __lowercase= key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): __lowercase= key.replace('module.last_layer_depth' , 'head.head' ) __lowercase= value return new_state_dict def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __lowercase= state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) __lowercase= state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict __lowercase= kv_weight[ : config.hidden_sizes[i], : ] __lowercase= kv_bias[: config.hidden_sizes[i]] __lowercase= kv_weight[ config.hidden_sizes[i] :, : ] __lowercase= kv_bias[config.hidden_sizes[i] :] def _lowerCamelCase( ) -> int: '''simple docstring''' __lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return image @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False , lowercase__=None ) -> Any: '''simple docstring''' __lowercase= GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) __lowercase= GLPNImageProcessor() # prepare image __lowercase= prepare_img() __lowercase= image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict __lowercase= torch.load(lowercase__ , map_location=torch.device('cpu' ) ) # rename keys __lowercase= rename_keys(lowercase__ ) # key and value matrices need special treatment read_in_k_v(lowercase__ , lowercase__ ) # create HuggingFace model and load state dict __lowercase= GLPNForDepthEstimation(lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # forward pass __lowercase= model(lowercase__ ) __lowercase= outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: __lowercase= torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: __lowercase= torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'Unknown model name: {model_name}' ) __lowercase= torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowercase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) lowerCAmelCase = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
295
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( A_ , unittest.TestCase ): UpperCamelCase_ : List[Any] =LEDTokenizer UpperCamelCase_ : List[Any] =LEDTokenizerFast UpperCamelCase_ : Tuple =True def _A (self ): super().setUp() __lowercase= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase= {'unk_token': '<unk>'} __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase ) ) def _A (self , **lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , **lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , lowerCAmelCase ): return "lower newer", "lower newer" @cached_property def _A (self ): return LEDTokenizer.from_pretrained('allenai/led-base-16384' ) @cached_property def _A (self ): return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' ) @require_torch def _A (self ): __lowercase= ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowercase= [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= tokenizer(lowerCAmelCase , max_length=len(lowerCAmelCase ) , padding=lowerCAmelCase , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __lowercase= batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) @require_torch def _A (self ): __lowercase= ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='pt' ) self.assertIn('input_ids' , lowerCAmelCase ) self.assertIn('attention_mask' , lowerCAmelCase ) self.assertNotIn('labels' , lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , lowerCAmelCase ) @require_torch def _A (self ): __lowercase= [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= tokenizer(text_target=lowerCAmelCase , max_length=3_2 , padding='max_length' , return_tensors='pt' ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) @require_torch def _A (self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= tokenizer( ['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) ) @require_torch def _A (self ): __lowercase= ['A long paragraph for summarization.'] __lowercase= [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= tokenizer(lowerCAmelCase , return_tensors='pt' ) __lowercase= tokenizer(text_target=lowerCAmelCase , return_tensors='pt' ) __lowercase= inputs['input_ids'] __lowercase= targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _A (self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase= ['Summary of the text.', 'Another summary.'] __lowercase= [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __lowercase= tokenizer(lowerCAmelCase , padding=lowerCAmelCase ) __lowercase= [[0] * len(lowerCAmelCase ) for x in encoded_output['input_ids']] __lowercase= tokenizer.pad(lowerCAmelCase ) self.assertSequenceEqual(outputs['global_attention_mask'] , lowerCAmelCase ) def _A (self ): pass def _A (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __lowercase= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) __lowercase= self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) __lowercase= 'A, <mask> AllenNLP sentence.' __lowercase= tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase ) __lowercase= tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase ) self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) __lowercase= tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) __lowercase= tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
295
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
1
def _lowerCamelCase( lowercase__ ) -> list[list[int]]: '''simple docstring''' __lowercase= [] if len(lowercase__ ) == 1: return [nums.copy()] for _ in range(len(lowercase__ ) ): __lowercase= nums.pop(0 ) __lowercase= permute(lowercase__ ) for perm in permutations: perm.append(lowercase__ ) result.extend(lowercase__ ) nums.append(lowercase__ ) return result def _lowerCamelCase( lowercase__ ) -> Optional[Any]: '''simple docstring''' def backtrack(lowercase__ ): if start == len(lowercase__ ) - 1: output.append(nums[:] ) else: for i in range(lowercase__ , len(lowercase__ ) ): __lowercase, __lowercase= nums[i], nums[start] backtrack(start + 1 ) __lowercase, __lowercase= nums[i], nums[start] # backtrack __lowercase= [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase = permutea([1, 2, 3]) print(res) doctest.testmod()
295
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : UpperCamelCase_ : int UpperCamelCase_ : Node | None class A : def __init__(self , lowerCAmelCase ): __lowercase= None for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ): __lowercase= Node(lowerCAmelCase , self.head ) def __iter__(self ): __lowercase= self.head while node: yield node.data __lowercase= node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(lowerCAmelCase ) for node in self] ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
295
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
1
def _lowerCamelCase( lowercase__ = 4_0_0_0_0_0_0 ) -> int: '''simple docstring''' __lowercase= [] __lowercase, __lowercase= 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(lowercase__ ) __lowercase, __lowercase= b, a + b return sum(lowercase__ ) if __name__ == "__main__": print(F'{solution() = }')
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A ( A_ ): UpperCamelCase_ : List[Any] ='''char''' UpperCamelCase_ : Union[str, Any] ='''bpe''' UpperCamelCase_ : Optional[Any] ='''wp''' lowerCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A ( A_ ): UpperCamelCase_ : Optional[int] =['''image_processor''', '''char_tokenizer'''] UpperCamelCase_ : List[Any] ='''ViTImageProcessor''' UpperCamelCase_ : Dict ='''MgpstrTokenizer''' def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase , ) __lowercase= kwargs.pop('feature_extractor' ) __lowercase= image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) __lowercase= tokenizer __lowercase= AutoTokenizer.from_pretrained('gpt2' ) __lowercase= AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(lowerCAmelCase , lowerCAmelCase ) def __call__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ): if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowercase= self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase ) if text is not None: __lowercase= self.char_tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: __lowercase= encodings['input_ids'] return inputs def _A (self , lowerCAmelCase ): __lowercase, __lowercase, __lowercase= sequences __lowercase= char_preds.size(0 ) __lowercase, __lowercase= self._decode_helper(lowerCAmelCase , 'char' ) __lowercase, __lowercase= self._decode_helper(lowerCAmelCase , 'bpe' ) __lowercase, __lowercase= self._decode_helper(lowerCAmelCase , 'wp' ) __lowercase= [] __lowercase= [] for i in range(lowerCAmelCase ): __lowercase= [char_scores[i], bpe_scores[i], wp_scores[i]] __lowercase= [char_strs[i], bpe_strs[i], wp_strs[i]] __lowercase= scores.index(max(lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __lowercase= {} __lowercase= final_strs __lowercase= final_scores __lowercase= char_strs __lowercase= bpe_strs __lowercase= wp_strs return out def _A (self , lowerCAmelCase , lowerCAmelCase ): if format == DecodeType.CHARACTER: __lowercase= self.char_decode __lowercase= 1 __lowercase= '[s]' elif format == DecodeType.BPE: __lowercase= self.bpe_decode __lowercase= 2 __lowercase= '#' elif format == DecodeType.WORDPIECE: __lowercase= self.wp_decode __lowercase= 1_0_2 __lowercase= '[SEP]' else: raise ValueError(f'Format {format} is not supported.' ) __lowercase, __lowercase= [], [] __lowercase= pred_logits.size(0 ) __lowercase= pred_logits.size(1 ) __lowercase, __lowercase= pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase , sorted=lowerCAmelCase ) __lowercase= preds_index.view(-1 , lowerCAmelCase )[:, 1:] __lowercase= decoder(lowerCAmelCase ) __lowercase, __lowercase= torch.nn.functional.softmax(lowerCAmelCase , dim=2 ).max(dim=2 ) __lowercase= preds_max_prob[:, 1:] for index in range(lowerCAmelCase ): __lowercase= preds_str[index].find(lowerCAmelCase ) __lowercase= preds_str[index][:pred_eos] __lowercase= preds_index[index].cpu().tolist() __lowercase= pred_index.index(lowerCAmelCase ) if eos_token in pred_index else -1 __lowercase= preds_max_prob[index][: pred_eos_index + 1] __lowercase= pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCAmelCase ) conf_scores.append(lowerCAmelCase ) return dec_strs, conf_scores def _A (self , lowerCAmelCase ): __lowercase= [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase )] return decode_strs def _A (self , lowerCAmelCase ): return self.bpe_tokenizer.batch_decode(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase )] return decode_strs
295
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
1
import os import jsonlines import numpy as np from tqdm import tqdm lowerCAmelCase = 2_0_4_8 lowerCAmelCase = 4_0_9_6 lowerCAmelCase = 4_2 lowerCAmelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''') lowerCAmelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' def choose_first(lowercase__ , lowercase__=False ): assert isinstance(lowercase__ , lowercase__ ) if len(lowercase__ ) == 1: __lowercase= answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __lowercase= {k: [a[k]] for k in a} if len(a['start_token'] ) > 0: break return a __lowercase= {'id': example['id']} __lowercase= example['annotations'] __lowercase= annotation['yes_no_answer'] if 0 in yes_no_answer or 1 in yes_no_answer: __lowercase= ['yes'] if 1 in yes_no_answer else ['no'] __lowercase= __lowercase= [] __lowercase= __lowercase= [] __lowercase= ['<cls>'] else: __lowercase= ['short'] __lowercase= choose_first(annotation['short_answers'] ) if len(out['start_token'] ) == 0: # answer will be long if short is not available __lowercase= ['long'] __lowercase= choose_first(annotation['long_answer'] , is_long_answer=lowercase__ ) __lowercase= [] answer.update(lowercase__ ) # disregard some samples if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]: __lowercase= True else: __lowercase= False __lowercase= ['start_token', 'end_token', 'start_byte', 'end_byte', 'text'] if not all(isinstance(answer[k] , lowercase__ ) for k in cols ): raise ValueError('Issue in ID' , example['id'] ) return answer def _lowerCamelCase( lowercase__ , lowercase__=False ) -> str: '''simple docstring''' __lowercase= _get_single_answer(lowercase__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowercase= example['document']['tokens'] __lowercase= [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) return { "context": " ".join(lowercase__ ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __lowercase= ['start_token', 'end_token'] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __lowercase= example['document']['tokens'] __lowercase= answer['start_token'] __lowercase= answer['end_token'] __lowercase= [] for i in range(len(doc['token'] ) ): if not doc["is_html"][i]: context.append(doc['token'][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __lowercase= ' '.join(context[start_token:end_token] ) # checking above code if assertion: __lowercase= doc['is_html'][answer['start_token'] : answer['end_token']] __lowercase= doc['token'][answer['start_token'] : answer['end_token']] __lowercase= ' '.join([old[i] for i in range(len(lowercase__ ) ) if not is_html[i]] ) if new != old: print('ID:' , example['id'] ) print('New:' , lowercase__ , end='\n' ) print('Old:' , lowercase__ , end='\n\n' ) return { "context": " ".join(lowercase__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=2_0_4_8 , lowercase__=4_0_9_6 , lowercase__=True ) -> Optional[Any]: '''simple docstring''' __lowercase= get_context_and_ans(lowercase__ , assertion=lowercase__ ) __lowercase= out['answer'] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __lowercase= tokenizer(example['question']['text'] , out['context'] ).input_ids __lowercase= input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __lowercase= [] __lowercase= [] __lowercase= input_ids[:q_len] __lowercase= range(lowercase__ , len(lowercase__ ) , max_length - doc_stride ) for i in doc_start_indices: __lowercase= i + max_length - q_len __lowercase= input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['category'][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(lowercase__ ), "end_token": [-1_0_0] * len(lowercase__ ), "category": category, }, } __lowercase= out['context'].split() __lowercase= splitted_context[answer['end_token']] __lowercase= len( tokenizer( ' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowercase__ , ).input_ids ) __lowercase= len( tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowercase__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __lowercase= len(tokenizer(lowercase__ , add_special_tokens=lowercase__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __lowercase= input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive __lowercase= answer['start_token'] __lowercase= answer['end_token'] if assertion: __lowercase= tokenizer.decode(lowercase__ ) if answer["span"] != new: print('ISSUE IN TOKENIZATION' ) print('OLD:' , answer['span'] ) print('NEW:' , lowercase__ , end='\n\n' ) if len(lowercase__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __lowercase= input_ids[:q_len] __lowercase= range(lowercase__ , len(lowercase__ ) , max_length - doc_stride ) __lowercase= [] __lowercase= [] __lowercase= [] __lowercase= [] # null, yes, no, long, short for i in doc_start_indices: __lowercase= i + max_length - q_len __lowercase= input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __lowercase= start_token - i + q_len __lowercase= end_token - i + q_len answers_category.append(answer['category'][0] ) # ["short"] -> "short" else: __lowercase= -1_0_0 __lowercase= -1_0_0 answers_category.append('null' ) __lowercase= inputs[-1][start_token : end_token + 1] answers_start_token.append(lowercase__ ) answers_end_token.append(lowercase__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('ISSUE in strided for ID:' , example['id'] ) print('New:' , tokenizer.decode(lowercase__ ) ) print('Old:' , tokenizer.decode(lowercase__ ) , end='\n\n' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=2_0_4_8 , lowercase__=4_0_9_6 , lowercase__=False ) -> Tuple: '''simple docstring''' __lowercase= get_strided_contexts_and_ans( lowercase__ , lowercase__ , doc_stride=lowercase__ , max_length=lowercase__ , assertion=lowercase__ , ) return example def _lowerCamelCase( lowercase__ , lowercase__ ) -> str: '''simple docstring''' with jsonlines.open(lowercase__ , 'a' ) as writer: for example in tqdm(lowercase__ , total=len(lowercase__ ) , desc='Saving samples ... ' ): __lowercase= example['labels'] for ids, start, end, cat in zip( example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { 'input_ids': ids, 'start_token': start, 'end_token': end, 'category': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCAmelCase = load_dataset('''natural_questions''') lowerCAmelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') lowerCAmelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] lowerCAmelCase = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCAmelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) lowerCAmelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
295
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
1
import os from datetime import datetime as dt from github import Github lowerCAmelCase = [ '''good first issue''', '''feature request''', '''wip''', ] def _lowerCamelCase( ) -> List[Any]: '''simple docstring''' __lowercase= Github(os.environ['GITHUB_TOKEN'] ) __lowercase= g.get_repo('huggingface/accelerate' ) __lowercase= repo.get_issues(state='open' ) for issue in open_issues: __lowercase= sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __lowercase= comments[0] if len(lowercase__ ) > 0 else None __lowercase= dt.utcnow() __lowercase= (current_time - issue.updated_at).days __lowercase= (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
295
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
1
import unittest import numpy as np def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> np.ndarray: '''simple docstring''' __lowercase= np.shape(lowercase__ ) __lowercase= np.shape(lowercase__ ) __lowercase= np.shape(lowercase__ ) if shape_a[0] != shape_b[0]: __lowercase= ( 'Expected the same number of rows for A and B. ' F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(lowercase__ ) if shape_b[1] != shape_c[1]: __lowercase= ( 'Expected the same number of columns for B and C. ' F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(lowercase__ ) __lowercase= pseudo_inv if a_inv is None: try: __lowercase= np.linalg.inv(lowercase__ ) except np.linalg.LinAlgError: raise ValueError( 'Input matrix A is not invertible. Cannot compute Schur complement.' ) return mat_c - mat_b.T @ a_inv @ mat_b class A ( unittest.TestCase ): def _A (self ): __lowercase= np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase= np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase= np.array([[2, 1], [6, 3]] ) __lowercase= schur_complement(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) __lowercase= np.block([[a, b], [b.T, c]] ) __lowercase= np.linalg.det(lowerCAmelCase ) __lowercase= np.linalg.det(lowerCAmelCase ) __lowercase= np.linalg.det(lowerCAmelCase ) self.assertAlmostEqual(lowerCAmelCase , det_a * det_s ) def _A (self ): __lowercase= np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase= np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase= np.array([[2, 1], [6, 3]] ) with self.assertRaises(lowerCAmelCase ): schur_complement(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) __lowercase= np.array([[0, 3], [3, 0], [2, 3]] ) __lowercase= np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(lowerCAmelCase ): schur_complement(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
295
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class A ( A_ ): UpperCamelCase_ : str ='''ibert''' def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=False , lowerCAmelCase="none" , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= position_embedding_type __lowercase= quant_mode __lowercase= force_dequant class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
295
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( A_ ): UpperCamelCase_ : UNetaDModel UpperCamelCase_ : KarrasVeScheduler def __init__(self , lowerCAmelCase , lowerCAmelCase ): super().__init__() self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) @torch.no_grad() def __call__(self , lowerCAmelCase = 1 , lowerCAmelCase = 5_0 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , **lowerCAmelCase , ): __lowercase= self.unet.config.sample_size __lowercase= (batch_size, 3, img_size, img_size) __lowercase= self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __lowercase= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __lowercase= self.scheduler.schedule[t] __lowercase= self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __lowercase, __lowercase= self.scheduler.add_noise_to_input(lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __lowercase= (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __lowercase= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __lowercase= (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __lowercase= self.scheduler.step_correct( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , step_output.prev_sample , step_output['derivative'] , ) __lowercase= step_output.prev_sample __lowercase= (sample / 2 + 0.5).clamp(0 , 1 ) __lowercase= sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowercase= self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
295
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
1
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _lowerCamelCase( lowercase__ , lowercase__=0 ) -> List[str]: '''simple docstring''' return sorted(lowercase__ , key=lambda lowercase__ : x[column] ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=float('inf' ) ) -> Optional[Any]: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , lowercase__ ): __lowercase= euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowercase= current_dis return min_dis def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=float('inf' ) ) -> Union[str, Any]: '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , lowercase__ ): for j in range(max(0 , i - 6 ) , lowercase__ ): __lowercase= euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowercase= current_dis return min_dis def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(lowercase__ , lowercase__ ) # recursion __lowercase= points_counts // 2 __lowercase= closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[:mid] , lowercase__ ) __lowercase= closest_pair_of_points_sqr( lowercase__ , points_sorted_on_y[mid:] , points_counts - mid ) __lowercase= min(lowercase__ , lowercase__ ) __lowercase= [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowercase__ ) __lowercase= dis_between_closest_in_strip( lowercase__ , len(lowercase__ ) , lowercase__ ) return min(lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= column_based_sort(lowercase__ , column=0 ) __lowercase= column_based_sort(lowercase__ , column=1 ) return ( closest_pair_of_points_sqr( lowercase__ , lowercase__ , lowercase__ ) ) ** 0.5 if __name__ == "__main__": lowerCAmelCase = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
from collections import namedtuple import requests from lxml import html # type: ignore lowerCAmelCase = namedtuple('''covid_data''', '''cases deaths recovered''') def _lowerCamelCase( lowercase__ = "https://www.worldometers.info/coronavirus/" ) -> covid_data: '''simple docstring''' __lowercase= '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(lowercase__ ).content ).xpath(lowercase__ ) ) lowerCAmelCase = '''Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}''' print(fmt.format(*covid_stats()))
295
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') lowerCAmelCase = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} lowerCAmelCase = '''>>zh<<''' lowerCAmelCase = '''Helsinki-NLP/''' if is_torch_available(): lowerCAmelCase = '''pt''' elif is_tf_available(): lowerCAmelCase = '''tf''' else: lowerCAmelCase = '''jax''' @require_sentencepiece class A ( A_ , unittest.TestCase ): UpperCamelCase_ : str =MarianTokenizer UpperCamelCase_ : Any =False UpperCamelCase_ : Optional[int] =True def _A (self ): super().setUp() __lowercase= ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= Path(self.tmpdirname ) save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab'] ) save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['source_spm'] ) copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['target_spm'] ) __lowercase= MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A (self , **lowerCAmelCase ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , lowerCAmelCase ): return ( "This is a test", "This is a test", ) def _A (self ): __lowercase= '</s>' __lowercase= 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase ) def _A (self ): __lowercase= list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(lowerCAmelCase ) , 9 ) def _A (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _A (self ): __lowercase= MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) __lowercase= en_de_tokenizer(['I am a small frog'] , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) __lowercase= [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0] self.assertListEqual(lowerCAmelCase , batch.input_ids[0] ) __lowercase= tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowerCAmelCase ) __lowercase= [x.name for x in Path(lowerCAmelCase ).glob('*' )] self.assertIn('source.spm' , lowerCAmelCase ) MarianTokenizer.from_pretrained(lowerCAmelCase ) def _A (self ): __lowercase= self.get_tokenizer() __lowercase= tok( ['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_1_2) ) def _A (self ): __lowercase= self.get_tokenizer() __lowercase= tok(['I am a tiny frog', 'I am a small frog'] , padding=lowerCAmelCase , return_tensors=lowerCAmelCase ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) ) @slow def _A (self ): # fmt: off __lowercase= {'input_ids': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , ) def _A (self ): __lowercase= MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' ) __lowercase= 'Tämä on testi' __lowercase= 'This is a test' __lowercase= [7_6, 7, 2_0_4_7, 2] __lowercase= [6_9, 1_2, 1_1, 9_4_0, 2] __lowercase= tokenizer(lowerCAmelCase ).input_ids self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowercase= tokenizer(text_target=lowerCAmelCase ).input_ids self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowercase= tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase , lowerCAmelCase )
295
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowerCAmelCase = 5_0_0_0_0_0 lowerCAmelCase ,lowerCAmelCase = os.path.split(__file__) lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def _lowerCamelCase( lowercase__ , **lowercase__ ) -> List[Any]: '''simple docstring''' __lowercase= dataset.map(**lowercase__ ) @get_duration def _lowerCamelCase( lowercase__ , **lowercase__ ) -> int: '''simple docstring''' __lowercase= dataset.filter(**lowercase__ ) def _lowerCamelCase( ) -> int: '''simple docstring''' __lowercase= {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: __lowercase= datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) __lowercase= generate_example_dataset( os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ ) __lowercase= transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__ ) def tokenize(lowercase__ ): return tokenizer(examples['text'] ) __lowercase= map(lowercase__ ) __lowercase= map(lowercase__ , batched=lowercase__ ) __lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='numpy' ): __lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='pandas' ): __lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='torch' , columns='numbers' ): __lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): __lowercase= map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ ) __lowercase= map(lowercase__ , function=lowercase__ , batched=lowercase__ ) __lowercase= filter(lowercase__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase__ , 'wb' ) as f: f.write(json.dumps(lowercase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
295
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
1
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> float: '''simple docstring''' if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate __lowercase= rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __lowercase= years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
295
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
1
def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' def merge(lowercase__ , lowercase__ ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(lowercase__ ) <= 1: return collection __lowercase= len(lowercase__ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
295
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class A ( A_ ): UpperCamelCase_ : List[str] ='''gptj''' UpperCamelCase_ : List[str] ={ '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__(self , lowerCAmelCase=5_0_4_0_0 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=2_8 , lowerCAmelCase=1_6 , lowerCAmelCase=6_4 , lowerCAmelCase=None , lowerCAmelCase="gelu_new" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=1E-5 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=5_0_2_5_6 , lowerCAmelCase=5_0_2_5_6 , lowerCAmelCase=False , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= n_positions __lowercase= n_embd __lowercase= n_layer __lowercase= n_head __lowercase= n_inner __lowercase= rotary_dim __lowercase= activation_function __lowercase= resid_pdrop __lowercase= embd_pdrop __lowercase= attn_pdrop __lowercase= layer_norm_epsilon __lowercase= initializer_range __lowercase= use_cache __lowercase= bos_token_id __lowercase= eos_token_id super().__init__( bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase = "default" , lowerCAmelCase = None , lowerCAmelCase = False , ): super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase ) if not getattr(self._config , 'pad_token_id' , lowerCAmelCase ): # TODO: how to do that better? __lowercase= 0 @property def _A (self ): __lowercase= OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' ) __lowercase= {0: 'batch', 1: 'past_sequence + sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return common_inputs @property def _A (self ): return self._config.n_layer @property def _A (self ): return self._config.n_head def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ): __lowercase= super(lowerCAmelCase , self ).generate_dummy_inputs( lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase ) # We need to order the input in the way they appears in the forward() __lowercase= OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __lowercase, __lowercase= common_inputs['input_ids'].shape # Not using the same length for past_key_values __lowercase= seqlen + 2 __lowercase= ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowercase= [ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers ) ] __lowercase= common_inputs['attention_mask'] if self.use_past: __lowercase= ordered_inputs['attention_mask'].dtype __lowercase= torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 ) return ordered_inputs @property def _A (self ): return 1_3
295
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase = { '''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''], '''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''VisionTextDualEncoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''FlaxVisionTextDualEncoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''TFVisionTextDualEncoderModel'''] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
295
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path lowerCAmelCase = [ {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''}, {'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''}, {'''dataset''': '''snli''', '''config_name''': '''plain_text'''}, {'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''}, {'''dataset''': '''wiki40b''', '''config_name''': '''en'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''}, {'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''}, {'''dataset''': '''natural_questions''', '''config_name''': '''default'''}, ] def _lowerCamelCase( lowercase__=True ) -> Any: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A_ ) ) class A ( A_ ): UpperCamelCase_ : Optional[int] =None UpperCamelCase_ : str =None def _A (self , lowerCAmelCase , lowerCAmelCase ): with TemporaryDirectory() as tmp_dir: __lowercase= dataset_module_factory(lowerCAmelCase , cache_dir=lowerCAmelCase ) __lowercase= import_main_class(dataset_module.module_path , dataset=lowerCAmelCase ) __lowercase= builder_cls( cache_dir=lowerCAmelCase , config_name=lowerCAmelCase , hash=dataset_module.hash , ) __lowercase= '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCAmelCase ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) __lowercase= cached_path(lowerCAmelCase , cache_dir=lowerCAmelCase ) self.assertTrue(os.path.exists(lowerCAmelCase ) ) @pytest.mark.integration def _lowerCamelCase( lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' __lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ ) __lowercase= import_main_class(dataset_module.module_path ) __lowercase= builder_cls( cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __lowercase= None builder_instance.download_and_prepare() __lowercase= builder_instance.as_dataset() assert ds @pytest.mark.integration def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' __lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ ) __lowercase= import_main_class(dataset_module.module_path , dataset=lowercase__ ) __lowercase= builder_cls( cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) __lowercase= builder_instance.as_streaming_dataset() assert ds assert isinstance(lowercase__ , lowercase__ ) assert "train" in ds assert isinstance(ds['train'] , lowercase__ ) assert next(iter(ds['train'] ) )
295
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
1
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= torch.load(lowercase__ , map_location='cpu' ) if "model" in sd.keys(): __lowercase= torch.load(lowercase__ , map_location='cpu' )['model'] # pop unnecessary weights __lowercase= [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(lowercase__ ) __lowercase= { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __lowercase= sd.pop(lowercase__ ) __lowercase= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __lowercase= sd[key] # We split QKV in separate Q,K,V __lowercase= key.replace('.qkv_proj.' , '.q_proj.' ) __lowercase= key.replace('.qkv_proj.' , '.k_proj.' ) __lowercase= key.replace('.qkv_proj.' , '.v_proj.' ) __lowercase= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __lowercase, __lowercase, __lowercase= torch.split(lowercase__ , depth // 3 , dim=0 ) __lowercase= q __lowercase= k __lowercase= v del sd[key] return sd @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None ) -> Union[str, Any]: '''simple docstring''' __lowercase= load_checkpoint(lowercase__ ) if config is not None: __lowercase= OPTConfig.from_pretrained(lowercase__ ) else: __lowercase= OPTConfig() __lowercase= OPTModel(lowercase__ ).half().eval() model.load_state_dict(lowercase__ ) # Check results Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fairseq_path''', type=str, help=( '''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:''' ''' https://huggingface.co/models?other=opt_metasq''' ), ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''') lowerCAmelCase = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
295
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class A ( unittest.TestCase ): def _A (self ): __lowercase= '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split() __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= { 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>', } __lowercase= { 'feature_size': 1, 'padding_value': 0.0, 'sampling_rate': 1_6_0_0_0, 'return_attention_mask': False, 'do_normalize': True, } __lowercase= tempfile.mkdtemp() __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(self.tmpdirname , lowerCAmelCase ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) # load decoder from hub __lowercase= 'hf-internal-testing/ngram-beam-search-decoder' def _A (self , **lowerCAmelCase ): __lowercase= self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , **lowerCAmelCase ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , **lowerCAmelCase ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase ) def _A (self ): shutil.rmtree(self.tmpdirname ) def _A (self ): __lowercase= self.get_tokenizer() __lowercase= self.get_feature_extractor() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) __lowercase= WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , lowerCAmelCase ) def _A (self ): __lowercase= WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __lowercase= WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def _A (self ): __lowercase= self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['xx'] ) with self.assertRaisesRegex(lowerCAmelCase , 'include' ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= floats_list((3, 1_0_0_0) ) __lowercase= feature_extractor(lowerCAmelCase , return_tensors='np' ) __lowercase= processor(lowerCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= 'This is a test string' __lowercase= processor(text=lowerCAmelCase ) __lowercase= tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _A (self , lowerCAmelCase=(2, 1_0, 1_6) , lowerCAmelCase=7_7 ): np.random.seed(lowerCAmelCase ) return np.random.rand(*lowerCAmelCase ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 ) __lowercase= processor.decode(lowerCAmelCase ) __lowercase= decoder.decode_beams(lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('</s> <s> </s>' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['fork'], ['spawn']] ) def _A (self , lowerCAmelCase ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __lowercase= processor.batch_decode(lowerCAmelCase ) else: with get_context(lowerCAmelCase ).Pool() as pool: __lowercase= processor.batch_decode(lowerCAmelCase , lowerCAmelCase ) __lowercase= list(lowerCAmelCase ) with get_context('fork' ).Pool() as p: __lowercase= decoder.decode_beams_batch(lowerCAmelCase , lowerCAmelCase ) __lowercase, __lowercase, __lowercase= [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase , decoded_processor.text ) self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text ) self.assertListEqual(lowerCAmelCase , decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase , decoded_processor.lm_score ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= self._get_dummy_logits() __lowercase= 1_5 __lowercase= -20.0 __lowercase= -4.0 __lowercase= processor.batch_decode( lowerCAmelCase , beam_width=lowerCAmelCase , beam_prune_logp=lowerCAmelCase , token_min_logp=lowerCAmelCase , ) __lowercase= decoded_processor_out.text __lowercase= list(lowerCAmelCase ) with get_context('fork' ).Pool() as pool: __lowercase= decoder.decode_beams_batch( lowerCAmelCase , lowerCAmelCase , beam_width=lowerCAmelCase , beam_prune_logp=lowerCAmelCase , token_min_logp=lowerCAmelCase , ) __lowercase= [d[0][0] for d in decoded_decoder_out] __lowercase= [d[0][2] for d in decoded_decoder_out] __lowercase= [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowerCAmelCase ) self.assertTrue(np.array_equal(lowerCAmelCase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] , lowerCAmelCase , atol=1E-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] , lowerCAmelCase , atol=1E-3 ) ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) __lowercase= self._get_dummy_logits() __lowercase= 2.0 __lowercase= 5.0 __lowercase= -20.0 __lowercase= True __lowercase= processor.batch_decode( lowerCAmelCase , alpha=lowerCAmelCase , beta=lowerCAmelCase , unk_score_offset=lowerCAmelCase , lm_score_boundary=lowerCAmelCase , ) __lowercase= decoded_processor_out.text __lowercase= list(lowerCAmelCase ) decoder.reset_params( alpha=lowerCAmelCase , beta=lowerCAmelCase , unk_score_offset=lowerCAmelCase , lm_score_boundary=lowerCAmelCase , ) with get_context('fork' ).Pool() as pool: __lowercase= decoder.decode_beams_batch( lowerCAmelCase , lowerCAmelCase , ) __lowercase= [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowerCAmelCase ) __lowercase= processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , lowerCAmelCase ) def _A (self ): __lowercase= WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) __lowercase= processor.decoder.model_container[processor.decoder._model_key] __lowercase= Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() __lowercase= os.listdir(lowerCAmelCase ) __lowercase= ['alphabet.json', 'language_model'] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= snapshot_download('hf-internal-testing/processor_with_lm' ) __lowercase= WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase ) __lowercase= processor.decoder.model_container[processor.decoder._model_key] __lowercase= Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() __lowercase= os.listdir(lowerCAmelCase ) __lowercase= os.listdir(lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) __lowercase= AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' ) __lowercase= floats_list((3, 1_0_0_0) ) __lowercase= processor_wavaveca(lowerCAmelCase , return_tensors='np' ) __lowercase= processor_auto(lowerCAmelCase , return_tensors='np' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __lowercase= self._get_dummy_logits() __lowercase= processor_wavaveca.batch_decode(lowerCAmelCase ) __lowercase= processor_auto.batch_decode(lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def _A (self ): __lowercase= self.get_feature_extractor() __lowercase= self.get_tokenizer() __lowercase= self.get_decoder() __lowercase= WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase , feature_extractor=lowerCAmelCase , decoder=lowerCAmelCase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , ) @staticmethod def _A (lowerCAmelCase , lowerCAmelCase ): __lowercase= [d[key] for d in offsets] return retrieved_list def _A (self ): __lowercase= WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) __lowercase= self._get_dummy_logits()[0] __lowercase= processor.decode(lowerCAmelCase , output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) ) self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] ) def _A (self ): __lowercase= WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) __lowercase= self._get_dummy_logits() __lowercase= processor.batch_decode(lowerCAmelCase , output_word_offsets=lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) ) self.assertListEqual( [' '.join(self.get_from_offsets(lowerCAmelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def _A (self ): import torch __lowercase= load_dataset('common_voice' , 'en' , split='train' , streaming=lowerCAmelCase ) __lowercase= ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) ) __lowercase= iter(lowerCAmelCase ) __lowercase= next(lowerCAmelCase ) __lowercase= AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) __lowercase= WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __lowercase= processor(sample['audio']['array'] , return_tensors='pt' ).input_values with torch.no_grad(): __lowercase= model(lowerCAmelCase ).logits.cpu().numpy() __lowercase= processor.decode(logits[0] , output_word_offsets=lowerCAmelCase ) __lowercase= model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __lowercase= [ { 'start_time': d['start_offset'] * time_offset, 'end_time': d['end_offset'] * time_offset, 'word': d['word'], } for d in output['word_offsets'] ] __lowercase= 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL' # output words self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase , 'word' ) ) , lowerCAmelCase ) self.assertEqual(' '.join(self.get_from_offsets(lowerCAmelCase , 'word' ) ) , output.text ) # output times __lowercase= torch.tensor(self.get_from_offsets(lowerCAmelCase , 'start_time' ) ) __lowercase= torch.tensor(self.get_from_offsets(lowerCAmelCase , 'end_time' ) ) # fmt: off __lowercase= torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) __lowercase= torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=0.01 ) ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=0.01 ) )
295
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
1
from ...processing_utils import ProcessorMixin class A ( A_ ): UpperCamelCase_ : Optional[Any] ='''WhisperFeatureExtractor''' UpperCamelCase_ : List[Any] ='''WhisperTokenizer''' def __init__(self , lowerCAmelCase , lowerCAmelCase ): super().__init__(lowerCAmelCase , lowerCAmelCase ) __lowercase= self.feature_extractor __lowercase= False def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ): return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase , language=lowerCAmelCase , no_timestamps=lowerCAmelCase ) def __call__(self , *lowerCAmelCase , **lowerCAmelCase ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase , **lowerCAmelCase ) __lowercase= kwargs.pop('audio' , lowerCAmelCase ) __lowercase= kwargs.pop('sampling_rate' , lowerCAmelCase ) __lowercase= kwargs.pop('text' , lowerCAmelCase ) if len(lowerCAmelCase ) > 0: __lowercase= args[0] __lowercase= args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __lowercase= self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase ) if text is not None: __lowercase= self.tokenizer(lowerCAmelCase , **lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: __lowercase= encodings['input_ids'] return inputs def _A (self , *lowerCAmelCase , **lowerCAmelCase ): return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase ) def _A (self , *lowerCAmelCase , **lowerCAmelCase ): return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="np" ): return self.tokenizer.get_prompt_ids(lowerCAmelCase , return_tensors=lowerCAmelCase )
295
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=9_9 , lowerCAmelCase=1_3 , lowerCAmelCase=1_6 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=3_2 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=3_0 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= decoder_seq_length # For common tests __lowercase= self.decoder_seq_length __lowercase= is_training __lowercase= use_attention_mask __lowercase= use_labels __lowercase= vocab_size __lowercase= d_model __lowercase= d_model __lowercase= decoder_layers __lowercase= decoder_layers __lowercase= decoder_ffn_dim __lowercase= decoder_attention_heads __lowercase= decoder_attention_heads __lowercase= eos_token_id __lowercase= bos_token_id __lowercase= pad_token_id __lowercase= decoder_start_token_id __lowercase= use_cache __lowercase= max_position_embeddings __lowercase= None __lowercase= decoder_seq_length __lowercase= 2 __lowercase= 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowercase= None if self.use_attention_mask: __lowercase= ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) __lowercase= TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= True __lowercase= TrOCRDecoder(config=lowerCAmelCase ).to(lowerCAmelCase ).eval() __lowercase= input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass __lowercase= model(lowerCAmelCase , use_cache=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , use_cache=lowerCAmelCase ) self.parent.assertTrue(len(lowerCAmelCase ) == len(lowerCAmelCase ) ) self.parent.assertTrue(len(lowerCAmelCase ) == len(lowerCAmelCase ) + 1 ) __lowercase= outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids __lowercase= ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and __lowercase= torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase= model(lowerCAmelCase )['last_hidden_state'] __lowercase= model(lowerCAmelCase , past_key_values=lowerCAmelCase )['last_hidden_state'] # select random slice __lowercase= ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase= output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() __lowercase= output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Union[str, Any] =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () UpperCamelCase_ : Optional[int] =(TrOCRForCausalLM,) if is_torch_available() else () UpperCamelCase_ : Optional[Any] ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} UpperCamelCase_ : Tuple =True UpperCamelCase_ : List[str] =False def _A (self ): __lowercase= TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase ) def _A (self ): pass def _A (self ): pass def _A (self ): pass def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase ) def _A (self ): return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def _A (self ): pass
295
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
1
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= VideoMAEConfig() set_architecture_configs(lowercase__ , lowercase__ ) if "finetuned" not in model_name: __lowercase= False if "finetuned" in model_name: __lowercase= 'huggingface/label-files' if "kinetics" in model_name: __lowercase= 4_0_0 __lowercase= 'kinetics400-id2label.json' elif "ssv2" in model_name: __lowercase= 1_7_4 __lowercase= 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) __lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) __lowercase= {int(lowercase__ ): v for k, v in idalabel.items()} __lowercase= idalabel __lowercase= {v: k for k, v in idalabel.items()} return config def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' if "small" in model_name: __lowercase= 3_8_4 __lowercase= 1_5_3_6 __lowercase= 1_2 __lowercase= 1_6 __lowercase= 1_2 __lowercase= 3 __lowercase= 1_9_2 __lowercase= 7_6_8 elif "large" in model_name: __lowercase= 1_0_2_4 __lowercase= 4_0_9_6 __lowercase= 2_4 __lowercase= 1_6 __lowercase= 1_2 __lowercase= 8 __lowercase= 5_1_2 __lowercase= 2_0_4_8 elif "huge" in model_name: __lowercase= 1_2_8_0 __lowercase= 5_1_2_0 __lowercase= 3_2 __lowercase= 1_6 __lowercase= 1_2 __lowercase= 8 __lowercase= 6_4_0 __lowercase= 2_5_6_0 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' if "encoder." in name: __lowercase= name.replace('encoder.' , '' ) if "cls_token" in name: __lowercase= name.replace('cls_token' , 'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: __lowercase= name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: __lowercase= name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: __lowercase= name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase= name.replace('patch_embed.norm' , 'videomae.embeddings.norm' ) if "decoder.blocks" in name: __lowercase= name.replace('decoder.blocks' , 'decoder.decoder_layers' ) if "blocks" in name: __lowercase= name.replace('blocks' , 'videomae.encoder.layer' ) if "attn.proj" in name: __lowercase= name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name and "bias" not in name: __lowercase= name.replace('attn' , 'attention.self' ) if "attn" in name: __lowercase= name.replace('attn' , 'attention.attention' ) if "norm1" in name: __lowercase= name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __lowercase= name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __lowercase= name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __lowercase= name.replace('mlp.fc2' , 'output.dense' ) if "decoder_embed" in name: __lowercase= name.replace('decoder_embed' , 'decoder.decoder_embed' ) if "decoder_norm" in name: __lowercase= name.replace('decoder_norm' , 'decoder.decoder_norm' ) if "decoder_pred" in name: __lowercase= name.replace('decoder_pred' , 'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: __lowercase= name.replace('norm.weight' , 'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: __lowercase= name.replace('norm.bias' , 'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: __lowercase= name.replace('head' , 'classifier' ) return name def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple: '''simple docstring''' for key in orig_state_dict.copy().keys(): __lowercase= orig_state_dict.pop(lowercase__ ) if key.startswith('encoder.' ): __lowercase= key.replace('encoder.' , '' ) if "qkv" in key: __lowercase= key.split('.' ) if key.startswith('decoder.blocks' ): __lowercase= config.decoder_hidden_size __lowercase= int(key_split[2] ) __lowercase= 'decoder.decoder_layers.' if "weight" in key: __lowercase= val[:dim, :] __lowercase= val[dim : dim * 2, :] __lowercase= val[-dim:, :] else: __lowercase= config.hidden_size __lowercase= int(key_split[1] ) __lowercase= 'videomae.encoder.layer.' if "weight" in key: __lowercase= val[:dim, :] __lowercase= val[dim : dim * 2, :] __lowercase= val[-dim:, :] else: __lowercase= val return orig_state_dict def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) __lowercase= np.load(lowercase__ ) return list(lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= get_videomae_config(lowercase__ ) if "finetuned" in model_name: __lowercase= VideoMAEForVideoClassification(lowercase__ ) else: __lowercase= VideoMAEForPreTraining(lowercase__ ) # download original checkpoint, hosted on Google Drive __lowercase= 'pytorch_model.bin' gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ ) __lowercase= torch.load(lowercase__ , map_location='cpu' ) if "model" in files: __lowercase= files['model'] else: __lowercase= files['module'] __lowercase= convert_state_dict(lowercase__ , lowercase__ ) model.load_state_dict(lowercase__ ) model.eval() # verify model on basic input __lowercase= VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) __lowercase= prepare_video() __lowercase= image_processor(lowercase__ , return_tensors='pt' ) if "finetuned" not in model_name: __lowercase= hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) __lowercase= torch.load(lowercase__ ) __lowercase= model(**lowercase__ ) __lowercase= outputs.logits __lowercase= [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": __lowercase= torch.Size([1, 4_0_0] ) __lowercase= torch.tensor([-0.9291, -0.4061, -0.9307] ) elif model_name == "videomae-small-finetuned-ssv2": __lowercase= torch.Size([1, 1_7_4] ) __lowercase= torch.tensor([0.2671, -0.4689, -0.8235] ) elif model_name == "videomae-base": __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] ) elif model_name == "videomae-base-short": __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ) # we verified the loss both for normalized and unnormalized targets for this one __lowercase= torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] ) elif model_name == "videomae-large": __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] ) elif model_name == "videomae-large-finetuned-kinetics": __lowercase= torch.Size([1, 4_0_0] ) __lowercase= torch.tensor([0.0771, 0.0011, -0.3625] ) elif model_name == "videomae-huge-finetuned-kinetics": __lowercase= torch.Size([1, 4_0_0] ) __lowercase= torch.tensor([0.2433, 0.1632, -0.4894] ) elif model_name == "videomae-base-short-finetuned-kinetics": __lowercase= torch.Size([1, 4_0_0] ) __lowercase= torch.tensor([0.6588, 0.0990, -0.2493] ) elif model_name == "videomae-base-finetuned-kinetics": __lowercase= torch.Size([1, 4_0_0] ) __lowercase= torch.tensor([0.3669, -0.0688, -0.2421] ) elif model_name == "videomae-base-short-ssv2": __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": __lowercase= torch.Size([1, 1_7_4] ) __lowercase= torch.tensor([-0.0537, -0.1539, -0.3266] ) elif model_name == "videomae-base-ssv2": __lowercase= torch.Size([1, 1_4_0_8, 1_5_3_6] ) __lowercase= torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] ) elif model_name == "videomae-base-finetuned-ssv2": __lowercase= torch.Size([1, 1_7_4] ) __lowercase= torch.tensor([0.1961, -0.8337, -0.6389] ) else: raise ValueError(F'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) else: print('Logits:' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1E-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": __lowercase= outputs.loss assert torch.allclose(lowercase__ , lowercase__ , atol=1E-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase__ ) model.save_pretrained(lowercase__ ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(lowercase__ , organization='nielsr' ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''', type=str, help=( '''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct''' ''' download link.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''/Users/nielsrogge/Documents/VideoMAE/Test''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class A ( A_ ): def _A (self ): __lowercase= tempfile.mkdtemp() __lowercase= 8 # DPR tok __lowercase= [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __lowercase= os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __lowercase= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase= {'unk_token': '<unk>'} __lowercase= os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) __lowercase= os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(lowerCAmelCase , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase ) ) def _A (self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _A (self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _A (self ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def _A (self ): __lowercase= os.path.join(self.tmpdirname , 'rag_tokenizer' ) __lowercase= RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __lowercase= RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowerCAmelCase ) rag_tokenizer.save_pretrained(lowerCAmelCase ) __lowercase= RagTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCAmelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , lowerCAmelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def _A (self ): __lowercase= RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __lowercase= [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __lowercase= tokenizer(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow def _A (self ): __lowercase= RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __lowercase= [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __lowercase= tokenizer(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase )
295
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
1
def _lowerCamelCase( lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= len(lowercase__ ) __lowercase= sum(lowercase__ ) __lowercase= [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __lowercase= True for i in range(1 , s + 1 ): __lowercase= False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __lowercase= dp[i][j - 1] if arr[i - 1] <= j: __lowercase= dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __lowercase= s - 2 * j break return diff
295
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
1
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart lowerCAmelCase = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } lowerCAmelCase = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } @lru_cache() def _lowerCamelCase( ) -> Dict: '''simple docstring''' __lowercase= ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __lowercase= bs[:] __lowercase= 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase__ ) cs.append(2**8 + n ) n += 1 __lowercase= [chr(lowercase__ ) for n in cs] return dict(zip(lowercase__ , lowercase__ ) ) def _lowerCamelCase( lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= set() __lowercase= word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase= char return pairs class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[str] =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ): __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase= AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token super().__init__( errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , ) with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle: __lowercase= json.load(lowerCAmelCase ) __lowercase= {v: k for k, v in self.encoder.items()} __lowercase= errors # how to handle errors in decoding __lowercase= bytes_to_unicode() __lowercase= {v: k for k, v in self.byte_encoder.items()} with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle: __lowercase= merges_handle.read().split('\n' )[1:-1] __lowercase= [tuple(merge.split() ) for merge in bpe_merges] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= {} __lowercase= add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase= re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def _A (self ): return len(self.encoder ) def _A (self ): return dict(self.encoder , **self.added_tokens_encoder ) def _A (self , lowerCAmelCase ): if token in self.cache: return self.cache[token] __lowercase= tuple(lowerCAmelCase ) __lowercase= get_pairs(lowerCAmelCase ) if not pairs: return token while True: __lowercase= min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __lowercase, __lowercase= bigram __lowercase= [] __lowercase= 0 while i < len(lowerCAmelCase ): try: __lowercase= word.index(lowerCAmelCase , lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase= j if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase= tuple(lowerCAmelCase ) __lowercase= new_word if len(lowerCAmelCase ) == 1: break else: __lowercase= get_pairs(lowerCAmelCase ) __lowercase= ' '.join(lowerCAmelCase ) __lowercase= word return word def _A (self , lowerCAmelCase ): __lowercase= [] for token in re.findall(self.pat , lowerCAmelCase ): __lowercase= ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) ) return bpe_tokens def _A (self , lowerCAmelCase ): return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) ) def _A (self , lowerCAmelCase ): return self.decoder.get(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= ''.join(lowerCAmelCase ) __lowercase= bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' ) __lowercase= 0 with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ' Please check that the tokenizer is not corrupted!' ) __lowercase= token_index writer.write(' '.join(lowerCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase= [self.cls_token_id] __lowercase= [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase )) + [1] return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.sep_token_id] __lowercase= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A (self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ): __lowercase= kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()): __lowercase= ' ' + text return (text, kwargs)
295
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] lowerCAmelCase = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def _lowerCamelCase( lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= { 'word_embeddings.weight': 'word_embeddings.weight', 'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight', 'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias', 'weight': 'ln_f.weight', 'bias': 'ln_f.bias', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks __lowercase= int(re.match(R'.*layer_(\d*).*' , lowercase__ )[1] ) layer_number -= 3 return F'h.{layer_number}.' + key def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' if dtype == torch.bool: return 1 / 8 __lowercase= re.search(R'[^\d](\d+)$' , str(lowercase__ ) ) if bit_search is None: raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' ) __lowercase= int(bit_search.groups()[0] ) return bit_size // 8 def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]: '''simple docstring''' if bloom_config_file == "": __lowercase= BloomConfig() else: __lowercase= BloomConfig.from_json_file(lowercase__ ) if shard_model: __lowercase= os.listdir(lowercase__ ) __lowercase= sorted(filter(lambda lowercase__ : s.startswith('layer' ) and "model_00" in s , lowercase__ ) ) __lowercase= {'weight_map': {}, 'metadata': {}} __lowercase= 0 __lowercase= None __lowercase= BloomConfig() for j, file in enumerate(lowercase__ ): print('Processing file: {}'.format(lowercase__ ) ) __lowercase= None for i in range(lowercase__ ): # load all TP files __lowercase= file.replace('model_00' , F'model_0{i}' ) __lowercase= torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='cpu' ) # Rename keys in the transformers names __lowercase= list(temp.keys() ) for key in keys: __lowercase= temp.pop(lowercase__ ) if tensors is None: __lowercase= temp else: for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase= 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase= torch.cat([tensors[key], temp[key]] , dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase= tensors[key] / pretraining_tp torch.save( lowercase__ , os.path.join( lowercase__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): __lowercase= tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: __lowercase= 'pytorch_model_{}-of-{}.bin'.format( str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) __lowercase= BloomConfig() __lowercase= pytorch_dump_folder_path + '/' + CONFIG_NAME __lowercase= total_size with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowercase__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f: __lowercase= json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n' f.write(lowercase__ ) else: __lowercase= BloomModel(lowercase__ ) __lowercase= os.listdir(lowercase__ ) __lowercase= sorted(filter(lambda lowercase__ : s.startswith('layer' ) and "model_00" in s , lowercase__ ) ) __lowercase= None for i, file in enumerate(lowercase__ ): __lowercase= None for i in range(lowercase__ ): # load all TP files __lowercase= file.replace('model_00' , F'model_0{i}' ) __lowercase= torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location='cpu' ) # Rename keys in the transformers names __lowercase= list(temp.keys() ) for key in keys: __lowercase= temp.pop(lowercase__ ) if tensors is None: __lowercase= temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase= 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase= torch.cat([tensors[key], temp[key]] , dim=lowercase__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase= tensors[key] / pretraining_tp __lowercase= model.load_state_dict(lowercase__ , strict=lowercase__ ) assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected' if missing_keys is None: __lowercase= set(other_keys.missing_keys ) else: __lowercase= missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'The keys {missing_keys} are missing' # Save pytorch-model os.makedirs(lowercase__ , exist_ok=lowercase__ ) __lowercase= pytorch_dump_folder_path + '/' + WEIGHTS_NAME __lowercase= pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' ) if config.torch_dtype is not None: __lowercase= model.to(config.torch_dtype ) torch.save(model.state_dict() , lowercase__ ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) lowerCAmelCase = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
295
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =ShapEImgaImgPipeline UpperCamelCase_ : int =['''image'''] UpperCamelCase_ : int =['''image'''] UpperCamelCase_ : Union[str, Any] =[ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] UpperCamelCase_ : Optional[Any] =False @property def _A (self ): return 3_2 @property def _A (self ): return 3_2 @property def _A (self ): return self.time_input_dim * 4 @property def _A (self ): return 8 @property def _A (self ): torch.manual_seed(0 ) __lowercase= CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowercase= CLIPVisionModel(lowerCAmelCase ) return model @property def _A (self ): __lowercase= CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=lowerCAmelCase , do_normalize=lowerCAmelCase , do_resize=lowerCAmelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , ) return image_processor @property def _A (self ): torch.manual_seed(0 ) __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 1_6, 'embedding_dim': self.time_input_dim, 'num_embeddings': 3_2, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowercase= PriorTransformer(**lowerCAmelCase ) return model @property def _A (self ): torch.manual_seed(0 ) __lowercase= { 'param_shapes': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 1_2, 'background': ( 0.1, 0.1, 0.1, ), } __lowercase= ShapERenderer(**lowerCAmelCase ) return model def _A (self ): __lowercase= self.dummy_prior __lowercase= self.dummy_image_encoder __lowercase= self.dummy_image_processor __lowercase= self.dummy_renderer __lowercase= HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , ) __lowercase= { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def _A (self , lowerCAmelCase , lowerCAmelCase=0 ): __lowercase= floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('mps' ): __lowercase= torch.manual_seed(lowerCAmelCase ) else: __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) __lowercase= { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 3_2, 'output_type': 'np', } return inputs def _A (self ): __lowercase= 'cpu' __lowercase= self.get_dummy_components() __lowercase= self.pipeline_class(**lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) __lowercase= output.images[0] __lowercase= image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __lowercase= np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A (self ): __lowercase= torch_device == 'cpu' __lowercase= True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , ) def _A (self ): __lowercase= self.get_dummy_components() __lowercase= self.pipeline_class(**lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 1 __lowercase= 2 __lowercase= self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: __lowercase= batch_size * [inputs[key]] __lowercase= pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): __lowercase= load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __lowercase= load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __lowercase= ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) __lowercase= pipe( lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch lowerCAmelCase = logging.get_logger(__name__) @dataclass class A : def __init__(self , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=6.0 , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase="fp4" , lowerCAmelCase=False , **lowerCAmelCase , ): __lowercase= load_in_abit __lowercase= load_in_abit __lowercase= llm_inta_threshold __lowercase= llm_inta_skip_modules __lowercase= llm_inta_enable_fpaa_cpu_offload __lowercase= llm_inta_has_fpaa_weight __lowercase= bnb_abit_quant_type __lowercase= bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __lowercase= torch.floataa elif isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= getattr(lowerCAmelCase , lowerCAmelCase ) elif isinstance(lowerCAmelCase , torch.dtype ): __lowercase= bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _A (self ): if not isinstance(self.llm_inta_threshold , lowerCAmelCase ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _A (self ): return self.load_in_abit or self.load_in_abit def _A (self ): if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _A (cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ): __lowercase= cls(**lowerCAmelCase ) __lowercase= [] for key, value in kwargs.items(): if hasattr(lowerCAmelCase , lowerCAmelCase ): setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) to_remove.append(lowerCAmelCase ) for key in to_remove: kwargs.pop(lowerCAmelCase , lowerCAmelCase ) if return_unused_kwargs: return config, kwargs else: return config def _A (self , lowerCAmelCase ): with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: __lowercase= self.to_dict() __lowercase= json.dumps(lowerCAmelCase , indent=2 , sort_keys=lowerCAmelCase ) + '\n' writer.write(lowerCAmelCase ) def _A (self ): __lowercase= copy.deepcopy(self.__dict__ ) __lowercase= str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__(self ): return f'{self.__class__.__name__} {self.to_json_string()}' def _A (self , lowerCAmelCase = True ): if use_diff is True: __lowercase= self.to_diff_dict() else: __lowercase= self.to_dict() return json.dumps(lowerCAmelCase , indent=2 , sort_keys=lowerCAmelCase ) + "\n" def _A (self ): __lowercase= self.to_dict() # get the default config dict __lowercase= BitsAndBytesConfig().to_dict() __lowercase= {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __lowercase= value return serializable_config_dict
295
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : str =StableDiffusionPanoramaPipeline UpperCamelCase_ : str =TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : Any =TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : int =TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS def _A (self ): torch.manual_seed(0 ) __lowercase= UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) __lowercase= DDIMScheduler() torch.manual_seed(0 ) __lowercase= AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __lowercase= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) __lowercase= CLIPTextModel(lowerCAmelCase ) __lowercase= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowercase= { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _A (self , lowerCAmelCase , lowerCAmelCase=0 ): __lowercase= torch.manual_seed(lowerCAmelCase ) __lowercase= { 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase ) __lowercase= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= sd_pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A (self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase ) __lowercase= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= 'french fries' __lowercase= sd_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase ) __lowercase= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= sd_pipe(**lowerCAmelCase , view_batch_size=2 ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' ) __lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase ) __lowercase= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= sd_pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= PNDMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCAmelCase ) __lowercase= StableDiffusionPanoramaPipeline(**lowerCAmelCase ) __lowercase= sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= sd_pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self , lowerCAmelCase=0 ): __lowercase= torch.manual_seed(lowerCAmelCase ) __lowercase= { 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _A (self ): __lowercase= 'stabilityai/stable-diffusion-2-base' __lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' ) __lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() __lowercase= self.get_inputs() __lowercase= pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __lowercase= np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _A (self ): __lowercase= StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCAmelCase ) __lowercase= LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() __lowercase= self.get_inputs() __lowercase= pipe(**lowerCAmelCase ).images __lowercase= image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) __lowercase= np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _A (self ): __lowercase= 0 def callback_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None: __lowercase= True nonlocal number_of_steps number_of_steps += 1 if step == 1: __lowercase= latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __lowercase= latents[0, -3:, -3:, -1] __lowercase= np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __lowercase= latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) __lowercase= latents[0, -3:, -3:, -1] __lowercase= np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __lowercase= False __lowercase= 'stabilityai/stable-diffusion-2-base' __lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' ) __lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() __lowercase= self.get_inputs() pipe(**lowerCAmelCase , callback=lowerCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _A (self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase= 'stabilityai/stable-diffusion-2-base' __lowercase= DDIMScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' ) __lowercase= StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase ) __lowercase= pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowercase= self.get_inputs() __lowercase= pipe(**lowerCAmelCase ) __lowercase= torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 1_0**9
295
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
1
from __future__ import annotations from collections.abc import Iterator from typing import Any class A : def __init__(self , lowerCAmelCase ): __lowercase= data __lowercase= None class A : def __init__(self ): __lowercase= None __lowercase= None def __iter__(self ): __lowercase= self.head while self.head: yield node.data __lowercase= node.next if node == self.head: break def __len__(self ): return sum(1 for _ in self ) def __repr__(self ): return "->".join(str(lowerCAmelCase ) for item in iter(self ) ) def _A (self , lowerCAmelCase ): self.insert_nth(len(self ) , lowerCAmelCase ) def _A (self , lowerCAmelCase ): self.insert_nth(0 , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase ): if index < 0 or index > len(self ): raise IndexError('list index out of range.' ) __lowercase= Node(lowerCAmelCase ) if self.head is None: __lowercase= new_node # first node points itself __lowercase= __lowercase= new_node elif index == 0: # insert at head __lowercase= self.head __lowercase= __lowercase= new_node else: __lowercase= self.head for _ in range(index - 1 ): __lowercase= temp.next __lowercase= temp.next __lowercase= new_node if index == len(self ) - 1: # insert at tail __lowercase= new_node def _A (self ): return self.delete_nth(0 ) def _A (self ): return self.delete_nth(len(self ) - 1 ) def _A (self , lowerCAmelCase = 0 ): if not 0 <= index < len(self ): raise IndexError('list index out of range.' ) __lowercase= self.head if self.head == self.tail: # just one node __lowercase= __lowercase= None elif index == 0: # delete head node __lowercase= self.tail.next.next __lowercase= self.head.next else: __lowercase= self.head for _ in range(index - 1 ): __lowercase= temp.next __lowercase= temp.next __lowercase= temp.next.next if index == len(self ) - 1: # delete at tail __lowercase= temp return delete_node.data def _A (self ): return len(self ) == 0 def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= CircularLinkedList() assert len(lowercase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(lowercase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(lowercase__ ) == i circular_linked_list.insert_nth(lowercase__ , i + 1 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
295
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
1
import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' print('Loading config file...' ) def flatten_yaml_as_dict(lowercase__ , lowercase__="" , lowercase__="." ): __lowercase= [] for k, v in d.items(): __lowercase= parent_key + sep + k if parent_key else k if isinstance(lowercase__ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowercase__ , lowercase__ , sep=lowercase__ ).items() ) else: items.append((new_key, v) ) return dict(lowercase__ ) __lowercase= argparse.Namespace() with open(lowercase__ , 'r' ) as yaml_file: try: __lowercase= yaml.load(lowercase__ , Loader=yaml.FullLoader ) __lowercase= flatten_yaml_as_dict(lowercase__ ) for k, v in flat_cfg.items(): setattr(lowercase__ , lowercase__ , lowercase__ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(lowercase__ , str(lowercase__ ) ) ) return config def _lowerCamelCase( lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= MobileViTVaConfig() __lowercase= False # dataset if task_name.startswith('imagenet1k_' ): __lowercase= 1_0_0_0 if int(task_name.strip().split('_' )[-1] ) == 3_8_4: __lowercase= 3_8_4 else: __lowercase= 2_5_6 __lowercase= 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): __lowercase= 2_1_0_0_0 if int(task_name.strip().split('_' )[-1] ) == 3_8_4: __lowercase= 3_8_4 else: __lowercase= 2_5_6 __lowercase= 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): __lowercase= 1_5_1 __lowercase= 5_1_2 __lowercase= 'ade20k-id2label.json' __lowercase= True elif task_name.startswith('voc_' ): __lowercase= 2_1 __lowercase= 5_1_2 __lowercase= 'pascal-voc-id2label.json' __lowercase= True # orig_config __lowercase= load_orig_config_file(lowercase__ ) assert getattr(lowercase__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" __lowercase= getattr(lowercase__ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(lowercase__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" __lowercase= getattr(lowercase__ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: __lowercase= getattr(lowercase__ , 'model.segmentation.output_stride' , 1_6 ) if "_deeplabv3" in task_name: __lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] ) __lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 ) __lowercase= getattr(lowercase__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label __lowercase= 'huggingface/label-files' __lowercase= json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) __lowercase= {int(lowercase__ ): v for k, v in idalabel.items()} __lowercase= idalabel __lowercase= {v: k for k, v in idalabel.items()} return config def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= dct.pop(lowercase__ ) __lowercase= val def _lowerCamelCase( lowercase__ , lowercase__=False ) -> Optional[Any]: '''simple docstring''' if base_model: __lowercase= '' else: __lowercase= 'mobilevitv2.' __lowercase= [] for k in state_dict.keys(): if k[:8] == "encoder.": __lowercase= k[8:] else: __lowercase= k if ".block." in k: __lowercase= k_new.replace('.block.' , '.' ) if ".conv." in k: __lowercase= k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: __lowercase= k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: __lowercase= k_new.replace('conv_1.' , F'{model_prefix}conv_stem.' ) for i in [1, 2]: if F'layer_{i}.' in k: __lowercase= k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' ) if ".exp_1x1." in k: __lowercase= k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: __lowercase= k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F'layer_{i}.0.' in k: __lowercase= k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' ) if F'layer_{i}.1.local_rep.0.' in k: __lowercase= k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' ) if F'layer_{i}.1.local_rep.1.' in k: __lowercase= k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' ) for i in [3, 4, 5]: if i == 3: __lowercase= [0, 1] elif i == 4: __lowercase= [0, 1, 2, 3] elif i == 5: __lowercase= [0, 1, 2] for j in j_in: if F'layer_{i}.1.global_rep.{j}.' in k: __lowercase= k_new.replace( F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' ) if F'layer_{i}.1.global_rep.{j+1}.' in k: __lowercase= k_new.replace( F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' ) if F'layer_{i}.1.conv_proj.' in k: __lowercase= k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' ) if "pre_norm_attn.0." in k: __lowercase= k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: __lowercase= k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: __lowercase= k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: __lowercase= k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: __lowercase= k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: __lowercase= k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: __lowercase= k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: __lowercase= k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: __lowercase= k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def _lowerCamelCase( lowercase__ ) -> Tuple: '''simple docstring''' __lowercase= [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(lowercase__ ) for k in keys_to_ignore: state_dict.pop(lowercase__ , lowercase__ ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" __lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= get_mobilevitva_config(lowercase__ , lowercase__ ) # load original state_dict __lowercase= torch.load(lowercase__ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): __lowercase= MobileViTVaForSemanticSegmentation(lowercase__ ).eval() __lowercase= False else: __lowercase= MobileViTVaForImageClassification(lowercase__ ).eval() __lowercase= False # remove and rename some keys of load the original model __lowercase= checkpoint remove_unused_keys(lowercase__ ) __lowercase= create_rename_keys(lowercase__ , base_model=lowercase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) # load modified state_dict model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by MobileViTImageProcessor __lowercase= MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) __lowercase= image_processor(images=prepare_img() , return_tensors='pt' ) __lowercase= model(**lowercase__ ) # verify classification model if task_name.startswith('imagenet' ): __lowercase= outputs.logits __lowercase= logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant __lowercase= torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(F'Saving model {task_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCAmelCase = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
295
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class A ( A_ ): UpperCamelCase_ : Tuple ='''megatron-bert''' def __init__(self , lowerCAmelCase=2_9_0_5_6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=2_4 , lowerCAmelCase=1_6 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= position_embedding_type __lowercase= use_cache
295
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
1
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase = 1_6 lowerCAmelCase = 3_2 def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_6 ) -> Tuple: '''simple docstring''' __lowercase= AutoTokenizer.from_pretrained('bert-base-cased' ) __lowercase= DatasetDict( { 'train': dataset['train'].select(lowercase__ ), 'validation': dataset['train'].select(lowercase__ ), 'test': dataset['validation'], } ) def tokenize_function(lowercase__ ): # max_length=None => use the model max length (it's actually the default) __lowercase= tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase= datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase= tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowercase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase= 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase= 1_6 elif accelerator.mixed_precision != "no": __lowercase= 8 else: __lowercase= None return tokenizer.pad( lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , ) # Instantiate dataloaders. __lowercase= DataLoader( tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) __lowercase= DataLoader( tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) __lowercase= DataLoader( tokenized_datasets['test'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader, test_dataloader def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= [] # Download the dataset __lowercase= load_dataset('glue' , 'mrpc' ) # Create our splits __lowercase= StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __lowercase= Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase= config['lr'] __lowercase= int(config['num_epochs'] ) __lowercase= int(config['seed'] ) __lowercase= int(config['batch_size'] ) __lowercase= evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation __lowercase= 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowercase= batch_size // MAX_GPU_BATCH_SIZE __lowercase= MAX_GPU_BATCH_SIZE set_seed(lowercase__ ) # New Code # # Create our folds: __lowercase= kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) __lowercase= [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowercase__ ): __lowercase, __lowercase, __lowercase= get_fold_dataloaders( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase= AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase= model.to(accelerator.device ) # Instantiate optimizer __lowercase= AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler __lowercase= get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase, __lowercase, __lowercase, __lowercase, __lowercase= accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowercase= model(**lowercase__ ) __lowercase= outputs.loss __lowercase= loss / gradient_accumulation_steps accelerator.backward(lowercase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase= model(**lowercase__ ) __lowercase= outputs.logits.argmax(dim=-1 ) __lowercase, __lowercase= accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) __lowercase= metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase__ ) # New Code # # We also run predictions on the test set at the very end __lowercase= [] for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase= model(**lowercase__ ) __lowercase= outputs.logits __lowercase, __lowercase= accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowercase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __lowercase= torch.cat(lowercase__ , dim=0 ) __lowercase= torch.stack(lowercase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __lowercase= metric.compute(predictions=lowercase__ , references=lowercase__ ) accelerator.print('Average test metrics from all folds:' , lowercase__ ) def _lowerCamelCase( ) -> Tuple: '''simple docstring''' __lowercase= argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=lowercase__ , default=3 , help='The number of splits to perform across the dataset' ) __lowercase= parser.parse_args() __lowercase= {'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
295
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
1
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class A ( unittest.TestCase ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=3_0 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0 , lowerCAmelCase=0.02 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_size __lowercase= num_channels __lowercase= is_training __lowercase= use_labels __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= type_sequence_label_size __lowercase= initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase= (image_size // patch_size) ** 2 __lowercase= num_patches + 1 def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= FlaxViTModel(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowercase= (self.image_size, self.image_size) __lowercase= (self.patch_size, self.patch_size) __lowercase= (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.type_sequence_label_size __lowercase= FlaxViTForImageClassification(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase= 1 __lowercase= FlaxViTForImageClassification(lowerCAmelCase ) __lowercase= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase= model(lowerCAmelCase ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A ( A_ , unittest.TestCase ): UpperCamelCase_ : List[str] =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _A (self ): __lowercase= FlaxViTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= model_class(lowerCAmelCase ) @jax.jit def model_jitted(lowerCAmelCase , **lowerCAmelCase ): return model(pixel_values=lowerCAmelCase , **lowerCAmelCase ) with self.subTest('JIT Enabled' ): __lowercase= model_jitted(**lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowercase= model_jitted(**lowerCAmelCase ).to_tuple() self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) ) for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _A (self ): for model_class_name in self.all_model_classes: __lowercase= model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowercase= model(np.ones((1, 3, 2_2_4, 2_2_4) ) ) self.assertIsNotNone(lowerCAmelCase )
295
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
1
import string def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= '' for i in sequence: __lowercase= ord(lowercase__ ) if 6_5 <= extract <= 9_0: output += chr(1_5_5 - extract ) elif 9_7 <= extract <= 1_2_2: output += chr(2_1_9 - extract ) else: output += i return output def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= string.ascii_letters __lowercase= string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(lowercase__ )] if c in letters else c for c in sequence ) def _lowerCamelCase( ) -> None: '''simple docstring''' from timeit import timeit print('Running performance benchmarks...' ) __lowercase= 'from string import printable ; from __main__ import atbash, atbash_slow' print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase__ )} seconds' ) print(F'> atbash(): {timeit("atbash(printable)" , setup=lowercase__ )} seconds' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(F'{example} encrypted in atbash: {atbash(example)}') benchmark()
295
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
1
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class A ( A_ ): UpperCamelCase_ : "DiagonalGaussianDistribution" class A ( A_ , A_ ): UpperCamelCase_ : List[str] =True @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 4 , lowerCAmelCase = 3_2 , lowerCAmelCase = 3_2 , lowerCAmelCase = 0.1_82_15 , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , norm_num_groups=lowerCAmelCase , act_fn=lowerCAmelCase , ) __lowercase= nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= False __lowercase= False # only relevant if vae tiling is enabled __lowercase= self.config.sample_size __lowercase= ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __lowercase= int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __lowercase= 0.25 def _A (self , lowerCAmelCase , lowerCAmelCase=False ): if isinstance(lowerCAmelCase , (Encoder, Decoder) ): __lowercase= value def _A (self , lowerCAmelCase = True ): __lowercase= use_tiling def _A (self ): self.enable_tiling(lowerCAmelCase ) def _A (self ): __lowercase= True def _A (self ): __lowercase= False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _A (self ): __lowercase= {} def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if hasattr(lowerCAmelCase , 'set_processor' ): __lowercase= module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}' , lowerCAmelCase , lowerCAmelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) return processors def _A (self , lowerCAmelCase ): __lowercase= len(self.attn_processors.keys() ) if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if hasattr(lowerCAmelCase , 'set_processor' ): if not isinstance(lowerCAmelCase , lowerCAmelCase ): module.set_processor(lowerCAmelCase ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}' , lowerCAmelCase , lowerCAmelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(lowerCAmelCase , return_dict=lowerCAmelCase ) if self.use_slicing and x.shape[0] > 1: __lowercase= [self.encoder(lowerCAmelCase ) for x_slice in x.split(1 )] __lowercase= torch.cat(lowerCAmelCase ) else: __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) __lowercase= DiagonalGaussianDistribution(lowerCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(lowerCAmelCase , return_dict=lowerCAmelCase ) __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): if self.use_slicing and z.shape[0] > 1: __lowercase= [self._decode(lowerCAmelCase ).sample for z_slice in z.split(1 )] __lowercase= torch.cat(lowerCAmelCase ) else: __lowercase= self._decode(lowerCAmelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= min(a.shape[2] , b.shape[2] , lowerCAmelCase ) for y in range(lowerCAmelCase ): __lowercase= a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= min(a.shape[3] , b.shape[3] , lowerCAmelCase ) for x in range(lowerCAmelCase ): __lowercase= a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __lowercase= int(self.tile_latent_min_size * self.tile_overlap_factor ) __lowercase= self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __lowercase= [] for i in range(0 , x.shape[2] , lowerCAmelCase ): __lowercase= [] for j in range(0 , x.shape[3] , lowerCAmelCase ): __lowercase= x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) row.append(lowerCAmelCase ) rows.append(lowerCAmelCase ) __lowercase= [] for i, row in enumerate(lowerCAmelCase ): __lowercase= [] for j, tile in enumerate(lowerCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowercase= self.blend_v(rows[i - 1][j] , lowerCAmelCase , lowerCAmelCase ) if j > 0: __lowercase= self.blend_h(row[j - 1] , lowerCAmelCase , lowerCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCAmelCase , dim=3 ) ) __lowercase= torch.cat(lowerCAmelCase , dim=2 ) __lowercase= DiagonalGaussianDistribution(lowerCAmelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __lowercase= int(self.tile_sample_min_size * self.tile_overlap_factor ) __lowercase= self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __lowercase= [] for i in range(0 , z.shape[2] , lowerCAmelCase ): __lowercase= [] for j in range(0 , z.shape[3] , lowerCAmelCase ): __lowercase= z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase ) row.append(lowerCAmelCase ) rows.append(lowerCAmelCase ) __lowercase= [] for i, row in enumerate(lowerCAmelCase ): __lowercase= [] for j, tile in enumerate(lowerCAmelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __lowercase= self.blend_v(rows[i - 1][j] , lowerCAmelCase , lowerCAmelCase ) if j > 0: __lowercase= self.blend_h(row[j - 1] , lowerCAmelCase , lowerCAmelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(lowerCAmelCase , dim=3 ) ) __lowercase= torch.cat(lowerCAmelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True , lowerCAmelCase = None , ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latent_dist if sample_posterior: __lowercase= posterior.sample(generator=lowerCAmelCase ) else: __lowercase= posterior.mode() __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
from math import pi def _lowerCamelCase( lowercase__ , lowercase__ ) -> float: '''simple docstring''' return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(9_0, 1_0))
295
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowerCamelCase( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ) -> List[str]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) __lowercase= [] for i in range(lowercase__ ): __lowercase= i / num_diffusion_timesteps __lowercase= (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class A ( A_ , A_ ): UpperCamelCase_ : List[Any] =[e.name for e in KarrasDiffusionSchedulers] UpperCamelCase_ : int =2 @register_to_config def __init__(self , lowerCAmelCase = 1_0_0_0 , lowerCAmelCase = 0.0_00_85 , lowerCAmelCase = 0.0_12 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "epsilon" , lowerCAmelCase = "linspace" , lowerCAmelCase = 0 , ): if trained_betas is not None: __lowercase= torch.tensor(lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __lowercase= torch.linspace(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase= ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase= betas_for_alpha_bar(lowerCAmelCase ) else: raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' ) __lowercase= 1.0 - self.betas __lowercase= torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if schedule_timesteps is None: __lowercase= self.timesteps __lowercase= (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowercase= 1 if len(lowerCAmelCase ) > 1 else 0 else: __lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep __lowercase= self._index_counter[timestep_int] return indices[pos].item() @property def _A (self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _A (self , lowerCAmelCase , lowerCAmelCase , ): __lowercase= self.index_for_timestep(lowerCAmelCase ) if self.state_in_first_order: __lowercase= self.sigmas[step_index] else: __lowercase= self.sigmas_interpol[step_index] __lowercase= sample / ((sigma**2 + 1) ** 0.5) return sample def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ): __lowercase= num_inference_steps __lowercase= num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowercase= np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase , dtype=lowerCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowercase= num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase= (np.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowercase= num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase= (np.arange(lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase ) timesteps -= 1 else: raise ValueError( f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) __lowercase= np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowercase= torch.from_numpy(np.log(lowerCAmelCase ) ).to(lowerCAmelCase ) __lowercase= np.interp(lowerCAmelCase , np.arange(0 , len(lowerCAmelCase ) ) , lowerCAmelCase ) __lowercase= np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowercase= torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase ) # interpolate sigmas __lowercase= sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __lowercase= torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __lowercase= torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowerCAmelCase ).startswith('mps' ): # mps does not support float64 __lowercase= torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase , dtype=torch.floataa ) else: __lowercase= torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase ) # interpolate timesteps __lowercase= self.sigma_to_t(lowerCAmelCase ).to(lowerCAmelCase , dtype=timesteps.dtype ) __lowercase= torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __lowercase= torch.cat([timesteps[:1], interleaved_timesteps] ) __lowercase= None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowercase= defaultdict(lowerCAmelCase ) def _A (self , lowerCAmelCase ): # get log sigma __lowercase= sigma.log() # get distribution __lowercase= log_sigma - self.log_sigmas[:, None] # get sigmas range __lowercase= dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __lowercase= low_idx + 1 __lowercase= self.log_sigmas[low_idx] __lowercase= self.log_sigmas[high_idx] # interpolate sigmas __lowercase= (low - log_sigma) / (low - high) __lowercase= w.clamp(0 , 1 ) # transform interpolation to time range __lowercase= (1 - w) * low_idx + w * high_idx __lowercase= t.view(sigma.shape ) return t @property def _A (self ): return self.sample is None def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ): __lowercase= self.index_for_timestep(lowerCAmelCase ) # advance index counter by 1 __lowercase= timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowercase= self.sigmas[step_index] __lowercase= self.sigmas_interpol[step_index + 1] __lowercase= self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __lowercase= self.sigmas[step_index - 1] __lowercase= self.sigmas_interpol[step_index] __lowercase= self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowercase= 0 __lowercase= sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowercase= sigma_hat if self.state_in_first_order else sigma_interpol __lowercase= sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowercase= sigma_hat if self.state_in_first_order else sigma_interpol __lowercase= model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample' ) else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowercase= (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowercase= sigma_interpol - sigma_hat # store for 2nd order step __lowercase= sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __lowercase= (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __lowercase= sigma_next - sigma_hat __lowercase= self.sample __lowercase= None __lowercase= sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowercase= self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ): # mps does not support float64 __lowercase= self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowercase= timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowercase= self.timesteps.to(original_samples.device ) __lowercase= timesteps.to(original_samples.device ) __lowercase= [self.index_for_timestep(lowerCAmelCase , lowerCAmelCase ) for t in timesteps] __lowercase= sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowercase= sigma.unsqueeze(-1 ) __lowercase= original_samples + noise * sigma return noisy_samples def __len__(self ): return self.config.num_train_timesteps
295
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
1
def _lowerCamelCase( lowercase__ ) -> list: '''simple docstring''' __lowercase= [0] * len(lowercase__ ) for i in range(1 , len(lowercase__ ) ): # use last results for better performance - dynamic programming __lowercase= prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __lowercase= prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __lowercase= j return prefix_result def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' return max(prefix_function(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
295
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
1
from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _lowerCamelCase( ) -> Tuple: '''simple docstring''' __lowercase= ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' ) __lowercase= parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(lowercase__ ) DownloadCommand.register_subcommand(lowercase__ ) EnvironmentCommand.register_subcommand(lowercase__ ) RunCommand.register_subcommand(lowercase__ ) ServeCommand.register_subcommand(lowercase__ ) UserCommands.register_subcommand(lowercase__ ) AddNewModelCommand.register_subcommand(lowercase__ ) AddNewModelLikeCommand.register_subcommand(lowercase__ ) LfsCommands.register_subcommand(lowercase__ ) PTtoTFCommand.register_subcommand(lowercase__ ) # Let's go __lowercase= parser.parse_args() if not hasattr(lowercase__ , 'func' ): parser.print_help() exit(1 ) # Run __lowercase= args.func(lowercase__ ) service.run() if __name__ == "__main__": main()
295
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
1
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowerCamelCase( lowercase__ = "isbn/0140328726" ) -> dict: '''simple docstring''' __lowercase= olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: __lowercase= F'{olid} is not a valid Open Library olid' raise ValueError(lowercase__ ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _lowerCamelCase( lowercase__ ) -> dict: '''simple docstring''' __lowercase= { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } __lowercase= {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} __lowercase= [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] __lowercase= data['First sentence']['value'] for key, value in data.items(): if isinstance(lowercase__ , lowercase__ ): __lowercase= ', '.join(lowercase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: lowerCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (1_0, 1_3) or not isbn.isdigit(): print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(F'\nSearching Open Library for ISBN: {isbn}...\n') try: lowerCAmelCase = summarize_book(get_openlibrary_data(F'isbn/{isbn}')) print('''\n'''.join(F'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'Sorry, there are no results for ISBN: {isbn}.')
295
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
1
lowerCAmelCase = [ (1_0_0_0, '''M'''), (9_0_0, '''CM'''), (5_0_0, '''D'''), (4_0_0, '''CD'''), (1_0_0, '''C'''), (9_0, '''XC'''), (5_0, '''L'''), (4_0, '''XL'''), (1_0, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0} __lowercase= 0 __lowercase= 0 while place < len(lowercase__ ): if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase= [] for arabic, roman in ROMAN: ((__lowercase), (__lowercase))= divmod(lowercase__ , lowercase__ ) result.append(roman * factor ) if number == 0: break return "".join(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
1
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class A ( unittest.TestCase ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=4 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_attention_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_choices def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_attention_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCAmelCase , ) return config, input_ids, attention_mask def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Optional[int] =( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _A (self ): __lowercase= FlaxDistilBertModelTester(self ) @slow def _A (self ): for model_class_name in self.all_model_classes: __lowercase= model_class_name.from_pretrained('distilbert-base-uncased' ) __lowercase= model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase ) @require_flax class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= (1, 1_1, 7_6_8) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def _lowerCamelCase( lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' __lowercase, __lowercase= emb.weight.shape __lowercase= nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ ) __lowercase= emb.weight.data return lin_layer def _lowerCamelCase( lowercase__ , lowercase__=None ) -> Optional[Any]: '''simple docstring''' __lowercase= {} for old_key in state_dict.keys(): __lowercase= old_key if "moe_layer.experts." in key: if expert_idx is not None: __lowercase= key.replace('moe_layer.experts.0' , F'ffn.experts.expert_{expert_idx}' ) else: __lowercase= key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: __lowercase= key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: __lowercase= key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: __lowercase= key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: __lowercase= key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: __lowercase= key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: __lowercase= key.replace('final_layer_norm' , 'ff_layer_norm' ) __lowercase= state_dict[old_key] return new_dict def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = WEIGHTS_NAME ) -> Tuple: '''simple docstring''' __lowercase= [] __lowercase= 0 os.makedirs(lowercase__ , exist_ok=lowercase__ ) for expert in range(lowercase__ ): __lowercase= switch_checkpoint_path + F'-rank-{expert}.pt' if os.path.isfile(lowercase__ ): __lowercase= torch.load(lowercase__ )['model'] remove_ignore_keys_(lowercase__ ) __lowercase= rename_fairseq_keys(lowercase__ , lowercase__ ) __lowercase= os.path.join( lowercase__ , weights_name.replace('.bin' , F'-{len(lowercase__ )+1:05d}-of-???.bin' ) ) torch.save(lowercase__ , lowercase__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(lowercase__ )[0]].dtype ) # Add the last block __lowercase= os.path.join(lowercase__ , weights_name.replace('.bin' , F'-{len(lowercase__ )+1:05d}-of-???.bin' ) ) __lowercase= torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(lowercase__ ) __lowercase= rename_fairseq_keys(lowercase__ , lowercase__ ) __lowercase= shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(lowercase__ ) == 1: __lowercase= os.path.join(lowercase__ , lowercase__ ) torch.save(lowercase__ , lowercase__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(lowercase__ , lowercase__ ) # Otherwise, let's build the index __lowercase= {} for idx, shard in enumerate(lowercase__ ): __lowercase= weights_name.replace('.bin' , F'-{idx+1:05d}-of-{len(lowercase__ ):05d}.bin' ) __lowercase= os.path.join(lowercase__ , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) ) os.rename(lowercase__ , os.path.join(lowercase__ , lowercase__ ) ) for key in shard: __lowercase= shard_file # Add the metadata __lowercase= {'total_size': total_size} __lowercase= {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(lowercase__ , lowercase__ ) , 'w' , encoding='utf-8' ) as f: __lowercase= json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n' f.write(lowercase__ ) return metadata, index if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) lowerCAmelCase = parser.parse_args() lowerCAmelCase ,lowerCAmelCase = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) lowerCAmelCase = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) lowerCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
295
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() def _A (self ): __lowercase, __lowercase= FlaxStableDiffusionPipeline.from_pretrained( 'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , ) __lowercase= 'A painting of a squirrel eating a burger' __lowercase= jax.device_count() __lowercase= num_samples * [prompt] __lowercase= sd_pipe.prepare_inputs(lowerCAmelCase ) __lowercase= replicate(lowerCAmelCase ) __lowercase= shard(lowerCAmelCase ) __lowercase= jax.random.PRNGKey(0 ) __lowercase= jax.random.split(lowerCAmelCase , jax.device_count() ) __lowercase= sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=2_5 , jit=lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) __lowercase= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase= images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] __lowercase= jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase= jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'stabilityai/stable-diffusion-2' __lowercase, __lowercase= FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder='scheduler' ) __lowercase, __lowercase= FlaxStableDiffusionPipeline.from_pretrained( lowerCAmelCase , scheduler=lowerCAmelCase , revision='bf16' , dtype=jnp.bfloataa , ) __lowercase= scheduler_params __lowercase= 'A painting of a squirrel eating a burger' __lowercase= jax.device_count() __lowercase= num_samples * [prompt] __lowercase= sd_pipe.prepare_inputs(lowerCAmelCase ) __lowercase= replicate(lowerCAmelCase ) __lowercase= shard(lowerCAmelCase ) __lowercase= jax.random.PRNGKey(0 ) __lowercase= jax.random.split(lowerCAmelCase , jax.device_count() ) __lowercase= sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=2_5 , jit=lowerCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3) __lowercase= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowercase= images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] __lowercase= jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase= jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
295
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
1
import math class A : def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= 0.0 __lowercase= 0.0 for i in range(len(lowerCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): for i in range(len(lowerCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowercase= [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowercase= SelfOrganizingMap() __lowercase= 3 __lowercase= 0.5 for _ in range(lowercase__ ): for j in range(len(lowercase__ ) ): # training sample __lowercase= training_samples[j] # Compute the winning vector __lowercase= self_organizing_map.get_winner(lowercase__ , lowercase__ ) # Update the winning vector __lowercase= self_organizing_map.update(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # classify test sample __lowercase= [0, 0, 0, 1] __lowercase= self_organizing_map.get_winner(lowercase__ , lowercase__ ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
295
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : List[str] =['''pixel_values'''] def __init__(self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 2_5_5 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): super().__init__(**lowerCAmelCase ) __lowercase= size if size is not None else {'shortest_edge': 2_5_6} __lowercase= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) __lowercase= crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} __lowercase= get_size_dict(lowerCAmelCase , param_name='crop_size' ) __lowercase= do_resize __lowercase= size __lowercase= resample __lowercase= do_center_crop __lowercase= crop_size __lowercase= do_rescale __lowercase= rescale_factor __lowercase= do_normalize __lowercase= image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase= image_std if image_std is not None else IMAGENET_STANDARD_STD def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __lowercase= get_resize_output_image_size(lowerCAmelCase , size=size['shortest_edge'] , default_to_square=lowerCAmelCase ) return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= get_size_dict(lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase , size=(size['height'], size['width']) , data_format=lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase ): return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ): return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ): __lowercase= do_resize if do_resize is not None else self.do_resize __lowercase= size if size is not None else self.size __lowercase= get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase ) __lowercase= resample if resample is not None else self.resample __lowercase= do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase= crop_size if crop_size is not None else self.crop_size __lowercase= get_size_dict(lowerCAmelCase , param_name='crop_size' ) __lowercase= do_rescale if do_rescale is not None else self.do_rescale __lowercase= rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase= do_normalize if do_normalize is not None else self.do_normalize __lowercase= image_mean if image_mean is not None else self.image_mean __lowercase= image_std if image_std is not None else self.image_std __lowercase= make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __lowercase= [to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: __lowercase= [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images] if do_center_crop: __lowercase= [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images] if do_rescale: __lowercase= [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images] if do_normalize: __lowercase= [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images] __lowercase= [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images] __lowercase= {'pixel_values': images} return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowerCAmelCase ): __lowercase= target_sizes.numpy() __lowercase= [] for idx in range(len(lowerCAmelCase ) ): __lowercase= torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCAmelCase ) __lowercase= resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase ) else: __lowercase= logits.argmax(dim=1 ) __lowercase= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
295
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
1
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> tuple[float, list[float]]: '''simple docstring''' __lowercase= list(range(len(lowercase__ ) ) ) __lowercase= [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) __lowercase= 0 __lowercase= [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: __lowercase= 1 max_value += value[i] capacity -= weight[i] else: __lowercase= capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
from sklearn.metrics import matthews_corrcoef import datasets lowerCAmelCase = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' lowerCAmelCase = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' lowerCAmelCase = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def _A (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html' ] , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ): return { "matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase ) ), }
295
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : str =AltDiffusionPipeline UpperCamelCase_ : List[str] =TEXT_TO_IMAGE_PARAMS UpperCamelCase_ : Any =TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase_ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS def _A (self ): torch.manual_seed(0 ) __lowercase= UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) __lowercase= DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) __lowercase= AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __lowercase= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , ) __lowercase= CLIPTextModel(lowerCAmelCase ) __lowercase= XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) __lowercase= 7_7 __lowercase= { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _A (self , lowerCAmelCase , lowerCAmelCase=0 ): if str(lowerCAmelCase ).startswith('mps' ): __lowercase= torch.manual_seed(lowerCAmelCase ) else: __lowercase= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) __lowercase= { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def _A (self ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() torch.manual_seed(0 ) __lowercase= RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , ) # TODO: remove after fixing the non-deterministic text encoder __lowercase= RobertaSeriesModelWithTransformation(lowerCAmelCase ) __lowercase= text_encoder __lowercase= AltDiffusionPipeline(**lowerCAmelCase ) __lowercase= alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= 'A photo of an astronaut' __lowercase= alt_pipe(**lowerCAmelCase ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array( [0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase= self.get_dummy_components() __lowercase= PNDMScheduler(skip_prk_steps=lowerCAmelCase ) torch.manual_seed(0 ) __lowercase= RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , ) # TODO: remove after fixing the non-deterministic text encoder __lowercase= RobertaSeriesModelWithTransformation(lowerCAmelCase ) __lowercase= text_encoder __lowercase= AltDiffusionPipeline(**lowerCAmelCase ) __lowercase= alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= self.get_dummy_inputs(lowerCAmelCase ) __lowercase= alt_pipe(**lowerCAmelCase ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase= np.array( [0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A (self ): # make sure here that pndm scheduler skips prk __lowercase= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase ) __lowercase= alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'A painting of a squirrel eating a burger' __lowercase= torch.manual_seed(0 ) __lowercase= alt_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='np' ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A (self ): __lowercase= DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' ) __lowercase= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase ) __lowercase= alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) __lowercase= 'A painting of a squirrel eating a burger' __lowercase= torch.manual_seed(0 ) __lowercase= alt_pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='numpy' ) __lowercase= output.images __lowercase= image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) __lowercase= np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
295
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowerCAmelCase = TypeVar('''T''') class A ( Generic[T] ): UpperCamelCase_ : deque[T] # Cache store of keys UpperCamelCase_ : set[T] # References of the keys in cache UpperCamelCase_ : int =10 # Maximum capacity of cache def __init__(self , lowerCAmelCase ): __lowercase= deque() __lowercase= set() if not n: __lowercase= sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: __lowercase= n def _A (self , lowerCAmelCase ): if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: __lowercase= self.dq_store.pop() self.key_reference.remove(lowerCAmelCase ) else: self.dq_store.remove(lowerCAmelCase ) self.dq_store.appendleft(lowerCAmelCase ) self.key_reference.add(lowerCAmelCase ) def _A (self ): for k in self.dq_store: print(lowerCAmelCase ) def __repr__(self ): return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}' if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
295
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
1
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCAmelCase = data_utils.TransfoXLTokenizer lowerCAmelCase = data_utils.TransfoXLCorpus lowerCAmelCase = data_utils lowerCAmelCase = data_utils def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase__ , 'rb' ) as fp: __lowercase= pickle.load(lowercase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) __lowercase= pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) __lowercase= corpus.vocab.__dict__ torch.save(lowercase__ , lowercase__ ) __lowercase= corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , lowercase__ ) __lowercase= pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(lowercase__ , lowercase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model __lowercase= os.path.abspath(lowercase__ ) __lowercase= os.path.abspath(lowercase__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": __lowercase= TransfoXLConfig() else: __lowercase= TransfoXLConfig.from_json_file(lowercase__ ) print(F'Building PyTorch model from configuration: {config}' ) __lowercase= TransfoXLLMHeadModel(lowercase__ ) __lowercase= load_tf_weights_in_transfo_xl(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model __lowercase= os.path.join(lowercase__ , lowercase__ ) __lowercase= os.path.join(lowercase__ , lowercase__ ) print(F'Save PyTorch model to {os.path.abspath(lowercase__ )}' ) torch.save(model.state_dict() , lowercase__ ) print(F'Save configuration file to {os.path.abspath(lowercase__ )}' ) with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) lowerCAmelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
295
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowerCAmelCase = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=3_2 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase=[2, 2, 3, 2] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=1_0 , lowerCAmelCase=0.02 , lowerCAmelCase=["stage2", "stage3", "stage4"] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= num_channels __lowercase= num_stages __lowercase= hidden_sizes __lowercase= depths __lowercase= is_training __lowercase= use_labels __lowercase= intermediate_size __lowercase= hidden_act __lowercase= num_labels __lowercase= initializer_range __lowercase= out_features __lowercase= out_indices __lowercase= scope def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.num_labels ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= ConvNextModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= ConvNextForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= ConvNextBackbone(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowercase= None __lowercase= ConvNextBackbone(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : str =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) UpperCamelCase_ : Union[str, Any] =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : List[str] =True UpperCamelCase_ : Dict =False UpperCamelCase_ : Tuple =False UpperCamelCase_ : Any =False UpperCamelCase_ : Optional[Any] =False def _A (self ): __lowercase= ConvNextModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A (self ): return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def _A (self ): pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) ) __lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase= self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= ConvNextModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def _A (self ): __lowercase= ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(lowerCAmelCase ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): __lowercase= model(**lowerCAmelCase ) # verify the logits __lowercase= torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) ) @require_torch class A ( unittest.TestCase , A_ ): UpperCamelCase_ : Optional[Any] =(ConvNextBackbone,) if is_torch_available() else () UpperCamelCase_ : List[str] =ConvNextConfig UpperCamelCase_ : int =False def _A (self ): __lowercase= ConvNextModelTester(self )
295
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
1
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= XLMRobertaModel.from_pretrained('xlm-roberta-base' ) __lowercase= torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house __lowercase= torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim __lowercase= torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __lowercase= model(lowerCAmelCase )['last_hidden_state'].detach() self.assertEqual(output.shape , lowerCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase , atol=1E-3 ) ) @slow def _A (self ): __lowercase= XLMRobertaModel.from_pretrained('xlm-roberta-large' ) __lowercase= torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house __lowercase= torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim __lowercase= torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): __lowercase= model(lowerCAmelCase )['last_hidden_state'].detach() self.assertEqual(output.shape , lowerCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase , atol=1E-3 ) )
295
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
1
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
def _lowerCamelCase( ) -> List[str]: '''simple docstring''' for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def _lowerCamelCase( lowercase__ ) -> Any: '''simple docstring''' __lowercase= 1 __lowercase= 2 while i * i <= n: __lowercase= 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _lowerCamelCase( ) -> int: '''simple docstring''' return next(i for i in triangle_number_generator() if count_divisors(lowercase__ ) > 5_0_0 ) if __name__ == "__main__": print(solution())
295
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
1
import argparse import os import re import packaging.version lowerCAmelCase = '''examples/''' lowerCAmelCase = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase = '''README.md''' def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]: '''simple docstring''' with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase= f.read() __lowercase, __lowercase= REPLACE_PATTERNS[pattern] __lowercase= replace.replace('VERSION' , lowercase__ ) __lowercase= re_pattern.sub(lowercase__ , lowercase__ ) with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' for folder, directories, fnames in os.walk(lowercase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern='examples' ) def _lowerCamelCase( lowercase__ , lowercase__=False ) -> int: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase__ , lowercase__ , lowercase__ ) if not patch: update_version_in_examples(lowercase__ ) def _lowerCamelCase( ) -> Dict: '''simple docstring''' __lowercase= '🤗 Transformers currently provides the following architectures' __lowercase= '1. Want to contribute a new model?' with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase= f.readlines() # Find the start of the list. __lowercase= 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowercase= start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): __lowercase= lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lowercase__ ) def _lowerCamelCase( ) -> List[str]: '''simple docstring''' with open(REPLACE_FILES['init'] , 'r' ) as f: __lowercase= f.read() __lowercase= REPLACE_PATTERNS['init'][0].search(lowercase__ ).groups()[0] return packaging.version.parse(lowercase__ ) def _lowerCamelCase( lowercase__=False ) -> List[str]: '''simple docstring''' __lowercase= get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: __lowercase= default_version.base_version elif patch: __lowercase= F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __lowercase= F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __lowercase= input(F'Which version are you releasing? [{default_version}]' ) if len(lowercase__ ) == 0: __lowercase= default_version print(F'Updating version to {version}.' ) global_version_update(lowercase__ , patch=lowercase__ ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _lowerCamelCase( ) -> Union[str, Any]: '''simple docstring''' __lowercase= get_version() __lowercase= F'{current_version.major}.{current_version.minor + 1}.0.dev0' __lowercase= current_version.base_version # Check with the user we got that right. __lowercase= input(F'Which version are we developing now? [{dev_version}]' ) if len(lowercase__ ) == 0: __lowercase= dev_version print(F'Updating version to {version}.' ) global_version_update(lowercase__ ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
295
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A ( enum.Enum ): UpperCamelCase_ : Optional[int] =0 UpperCamelCase_ : Tuple =1 UpperCamelCase_ : Optional[int] =2 @add_end_docstrings(A_ ) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__(self , *lowerCAmelCase , **lowerCAmelCase ): super().__init__(*lowerCAmelCase , **lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowercase= None if self.model.config.prefix is not None: __lowercase= self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowercase= self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params ) __lowercase= {**self._preprocess_params, **preprocess_params} __lowercase= {**self._forward_params, **forward_params} def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ): __lowercase= {} if prefix is not None: __lowercase= prefix if prefix: __lowercase= self.tokenizer( lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected' ' [None, \'hole\']' ) __lowercase= handle_long_generation preprocess_params.update(lowerCAmelCase ) __lowercase= generate_kwargs __lowercase= {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowercase= ReturnType.TENSORS if return_type is not None: __lowercase= return_type if clean_up_tokenization_spaces is not None: __lowercase= clean_up_tokenization_spaces if stop_sequence is not None: __lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) if len(lowerCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowercase= stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A (self , *lowerCAmelCase , **lowerCAmelCase ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase ) def __call__(self , lowerCAmelCase , **lowerCAmelCase ): return super().__call__(lowerCAmelCase , **lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ): __lowercase= self.tokenizer( prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework ) __lowercase= prompt_text if handle_long_generation == "hole": __lowercase= inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowercase= generate_kwargs['max_new_tokens'] else: __lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowercase= self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowercase= inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowercase= inputs['attention_mask'][:, -keep_length:] return inputs def _A (self , lowerCAmelCase , **lowerCAmelCase ): __lowercase= model_inputs['input_ids'] __lowercase= model_inputs.get('attention_mask' , lowerCAmelCase ) # Allow empty prompts if input_ids.shape[1] == 0: __lowercase= None __lowercase= None __lowercase= 1 else: __lowercase= input_ids.shape[0] __lowercase= model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowercase= generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowercase= 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowercase= 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase ) __lowercase= generated_sequence.shape[0] if self.framework == "pt": __lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ): __lowercase= model_outputs['generated_sequence'][0] __lowercase= model_outputs['input_ids'] __lowercase= model_outputs['prompt_text'] __lowercase= generated_sequence.numpy().tolist() __lowercase= [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowercase= {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowercase= self.tokenizer.decode( lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowercase= 0 else: __lowercase= len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) ) if return_type == ReturnType.FULL_TEXT: __lowercase= prompt_text + text[prompt_length:] else: __lowercase= text[prompt_length:] __lowercase= {'generated_text': all_text} records.append(lowerCAmelCase ) return records
295
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class A ( A_ ): UpperCamelCase_ : torch.FloatTensor class A ( A_ , A_ ): @register_to_config def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ): super().__init__() # pass init params to Encoder __lowercase= Encoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , ) __lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) __lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase ) __lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 ) # pass init params to Decoder __lowercase= Decoder( in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= self.encoder(lowerCAmelCase ) __lowercase= self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ): # also go through quantization layer if not force_not_quantize: __lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase ) else: __lowercase= h __lowercase= self.post_quant_conv(lowerCAmelCase ) __lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase = True ): __lowercase= sample __lowercase= self.encode(lowerCAmelCase ).latents __lowercase= self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
295
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance lowerCAmelCase = 6_3_7_8_1_3_7.0 lowerCAmelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5 lowerCAmelCase = 6_3_7_8_1_3_7 def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> float: '''simple docstring''' __lowercase= (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __lowercase= atan((1 - flattening) * tan(radians(lowercase__ ) ) ) __lowercase= atan((1 - flattening) * tan(radians(lowercase__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __lowercase= haversine_distance(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __lowercase= (b_lata + b_lata) / 2 __lowercase= (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __lowercase= (sin(lowercase__ ) ** 2) * (cos(lowercase__ ) ** 2) __lowercase= cos(sigma / 2 ) ** 2 __lowercase= (sigma - sin(lowercase__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __lowercase= (cos(lowercase__ ) ** 2) * (sin(lowercase__ ) ** 2) __lowercase= sin(sigma / 2 ) ** 2 __lowercase= (sigma + sin(lowercase__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
295
import os import numpy import onnx def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= a.name __lowercase= b.name __lowercase= '' __lowercase= '' __lowercase= a == b __lowercase= name_a __lowercase= name_b return res def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase__ , lowercase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) _graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(lowercase__ , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any: '''simple docstring''' __lowercase= list(model.graph.initializer ) __lowercase= list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __lowercase= inits[i].name __lowercase= inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' __lowercase= os.path.dirname(lowercase__ ) __lowercase= os.path.basename(lowercase__ ) __lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) ) __lowercase= list(model.graph.initializer ) __lowercase= set() __lowercase= {} __lowercase= [] __lowercase= 0 for i in range(len(lowercase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowercase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowercase__ ) dup_set.add(lowercase__ ) __lowercase= inits[j].data_type __lowercase= numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print('unexpected data type: ' , lowercase__ ) total_reduced_size += mem_size __lowercase= inits[i].name __lowercase= inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase__ ) else: __lowercase= [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' ) __lowercase= sorted(lowercase__ ) _remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ ) __lowercase= 'optimized_' + model_file_name __lowercase= os.path.join(lowercase__ , lowercase__ ) onnx.save(lowercase__ , lowercase__ ) return new_model
295
1
import os import pytest from attr import dataclass lowerCAmelCase = '''us-east-1''' # defaults region @dataclass class A : UpperCamelCase_ : str UpperCamelCase_ : str ='''arn:aws:iam::558105141721:role/sagemaker_execution_role''' UpperCamelCase_ : str ={ '''task_name''': '''mnli''', '''per_device_train_batch_size''': 16, '''per_device_eval_batch_size''': 16, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 500, '''save_steps''': 5_500, } UpperCamelCase_ : Any ={**hyperparameters, '''max_steps''': 1_000} @property def _A (self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _A (self ): return f'{self.framework}-transfromers-test' @property def _A (self ): return f'./tests/sagemaker/scripts/{self.framework}' @property def _A (self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= SageMakerTestEnvironment(framework=request.cls.framework )
295
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def _lowerCamelCase( lowercase__ ) -> str: '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') lowerCAmelCase = parser.parse_args() if args.check_lib: lowerCAmelCase = importlib.import_module('''transformers''') lowerCAmelCase = Path(transformers_module.__file__).parent else: lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
295
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class A : UpperCamelCase_ : Optional[str] =field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCamelCase_ : Optional[float] =field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCamelCase_ : Optional[int] =field( default=A_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCamelCase_ : Optional[int] =field( default=A_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def _A (self ): __lowercase= {} if self.train_dir is not None: __lowercase= self.train_dir if self.validation_dir is not None: __lowercase= self.validation_dir __lowercase= data_files if data_files else None @dataclass class A : UpperCamelCase_ : str =field( default=A_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCamelCase_ : str =field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCamelCase_ : float =field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class A ( A_ ): UpperCamelCase_ : float =field( default=1e-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' __lowercase= torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _lowerCamelCase( ) -> List[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowercase, __lowercase, __lowercase= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowercase= training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __lowercase= None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowercase= get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. __lowercase= load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __lowercase= None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: __lowercase= ds['train'].train_test_split(data_args.train_val_split ) __lowercase= split['train'] __lowercase= split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: __lowercase= ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: __lowercase= ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: __lowercase= ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: __lowercase= ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: __lowercase= ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: __lowercase= ViTImageProcessor() # create model if model_args.model_name_or_path: __lowercase= ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) __lowercase= ViTMAEForPreTraining(lowercase__ ) if training_args.do_train: __lowercase= ds['train'].column_names else: __lowercase= ds['validation'].column_names if data_args.image_column_name is not None: __lowercase= data_args.image_column_name elif "image" in column_names: __lowercase= 'image' elif "img" in column_names: __lowercase= 'img' else: __lowercase= column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: __lowercase= image_processor.size['shortest_edge'] else: __lowercase= (image_processor.size['height'], image_processor.size['width']) __lowercase= Compose( [ Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(lowercase__ ): __lowercase= [transforms(lowercase__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __lowercase= ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __lowercase= ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Compute absolute learning rate __lowercase= ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: __lowercase= training_args.base_learning_rate * total_train_batch_size / 2_5_6 # Initialize our trainer __lowercase= Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: __lowercase= None if training_args.resume_from_checkpoint is not None: __lowercase= training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowercase= last_checkpoint __lowercase= trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __lowercase= trainer.evaluate() trainer.log_metrics('eval' , lowercase__ ) trainer.save_metrics('eval' , lowercase__ ) # Write model card and (optionally) push to hub __lowercase= { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Dict: '''simple docstring''' main() if __name__ == "__main__": main()
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any: '''simple docstring''' if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowercase, __lowercase= ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowerCAmelCase = input('''Enter integers separated by spaces: ''') lowerCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
295
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''', '''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''', '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json''' ), '''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''', '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''', '''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''', '''cl-tohoku/bert-base-japanese-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json''' ), '''cl-tohoku/bert-base-japanese-char-whole-word-masking''': ( '''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json''' ), '''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( A_ ): UpperCamelCase_ : Dict ='''bert''' def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ): super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase ) __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= hidden_act __lowercase= intermediate_size __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= initializer_range __lowercase= layer_norm_eps __lowercase= position_embedding_type __lowercase= use_cache __lowercase= classifier_dropout class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
295
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list: '''simple docstring''' __lowercase= [] __lowercase= 0 for index, char in enumerate(lowercase__ ): if char == separator: split_words.append(string[last_index:index] ) __lowercase= index + 1 elif index + 1 == len(lowercase__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
295
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A ( A_ , unittest.TestCase ): UpperCamelCase_ : List[Any] =DebertaTokenizer UpperCamelCase_ : List[Any] =True UpperCamelCase_ : Optional[Any] =DebertaTokenizerFast def _A (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase= [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '[UNK]', ] __lowercase= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) ) __lowercase= ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase= {'unk_token': '[UNK]'} __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowerCAmelCase ) ) def _A (self , **lowerCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= 'lower newer' __lowercase= 'lower newer' return input_text, output_text def _A (self ): __lowercase= self.get_tokenizer() __lowercase= 'lower newer' __lowercase= ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] __lowercase= tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowercase= tokens + [tokenizer.unk_token] __lowercase= [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase ) def _A (self ): __lowercase= self.get_tokenizer() __lowercase= tokenizer('Hello' , 'World' ) __lowercase= [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['token_type_ids'] , lowerCAmelCase ) @slow def _A (self ): __lowercase= self.tokenizer_class.from_pretrained('microsoft/deberta-base' ) __lowercase= tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase ) __lowercase= tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase ) __lowercase= tokenizer.encode( 'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase ) __lowercase= tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase ) __lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase ) __lowercase= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _A (self ): __lowercase= [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowercase= tokenizer_class.from_pretrained('microsoft/deberta-base' ) __lowercase= [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] __lowercase= tokenizer(lowerCAmelCase , padding=lowerCAmelCase ) __lowercase= [tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) for seq in encoding['input_ids']] # fmt: off __lowercase= { 'input_ids': [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], 'token_type_ids': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowercase= [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] self.assertDictEqual(encoding.data , lowerCAmelCase ) for expected, decoded in zip(lowerCAmelCase , lowerCAmelCase ): self.assertEqual(lowerCAmelCase , lowerCAmelCase )
295
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict: '''simple docstring''' return field(default_factory=lambda: default , metadata=lowercase__ ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''The csv file to plot.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) UpperCamelCase_ : bool =field( default=A_ , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) UpperCamelCase_ : Optional[List[str]] =list_field( default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: int(lowercase__ ) return True except ValueError: return False def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' try: float(lowercase__ ) return True except ValueError: return False class A : def __init__(self , lowerCAmelCase ): __lowercase= args __lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='' ) as csv_file: __lowercase= csv.DictReader(lowerCAmelCase ) for row in reader: __lowercase= row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) ) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) ) if can_convert_to_int(row['result'] ): # value is not None __lowercase= int(row['result'] ) elif can_convert_to_float(row['result'] ): # value is not None __lowercase= float(row['result'] ) def _A (self ): __lowercase, __lowercase= plt.subplots() __lowercase= 'Time usage' if self.args.is_time else 'Memory usage' __lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log' ) ax.set_yscale('log' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): __lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) ) __lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) ) __lowercase= self.result_dict[model_name]['result'] ((__lowercase), (__lowercase))= ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase= ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase= np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , ) else: __lowercase= np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase), (__lowercase))= ( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )] plt.scatter( lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' ) plt.plot(lowerCAmelCase , lowerCAmelCase , '--' ) title_str += f' {label_model_name} vs.' __lowercase= title_str[:-4] __lowercase= 'Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(lowerCAmelCase ) plt.xlabel(lowerCAmelCase ) plt.ylabel(lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _lowerCamelCase( ) -> str: '''simple docstring''' __lowercase= HfArgumentParser(lowercase__ ) __lowercase= parser.parse_args_into_dataclasses()[0] __lowercase= Plot(args=lowercase__ ) plot.plot() if __name__ == "__main__": main()
295
1
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} lowerCAmelCase = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } lowerCAmelCase = { '''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8, } def _lowerCamelCase( lowercase__ , lowercase__ ) -> str: '''simple docstring''' with open(lowercase__ , 'r' , encoding='utf-8' ) as f: __lowercase= json.loads(f.read() ) __lowercase= collections.OrderedDict() __lowercase= collections.OrderedDict() __lowercase= collections.OrderedDict() with open(lowercase__ , 'r' , encoding='utf-8' ) as f: __lowercase= f.readlines() __lowercase= [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowercase__ ): __lowercase= b __lowercase= idx for wd in b: __lowercase= idx return vocab, raw_vocab, ids_to_tokens, emoji class A ( A_ ): UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES UpperCamelCase_ : str =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|startoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ): super().__init__( unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , do_clean_text=lowerCAmelCase , **lowerCAmelCase , ) if not os.path.isfile(lowerCAmelCase ): raise ValueError( f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(lowerCAmelCase ): raise ValueError( f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) __lowercase= do_clean_text __lowercase, __lowercase, __lowercase, __lowercase= load_vocab_and_emoji(lowerCAmelCase , lowerCAmelCase ) __lowercase= SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def _A (self ): # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def _A (self ): return dict(self.raw_vocab , **self.added_tokens_encoder ) def _A (self , lowerCAmelCase ): return self.subword_tokenizer.tokenize(lowerCAmelCase , clean=self.do_clean_text ) def _A (self , lowerCAmelCase ): return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) ) def _A (self , lowerCAmelCase ): return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase ) def _A (self , lowerCAmelCase ): __lowercase= ''.join(lowerCAmelCase ).strip() return out_string def _A (self , lowerCAmelCase ): __lowercase= [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] ) if len(lowerCAmelCase ) > self.model_max_length: __lowercase= input_ids[-self.model_max_length :] return input_ids def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= 0 if os.path.isdir(lowerCAmelCase ): __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: __lowercase= ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) __lowercase= ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) __lowercase= token_index writer.write(','.join(lowerCAmelCase ) + '\n' ) index += 1 with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , lowerCAmelCase ) return vocab_file, emoji_file class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= vocab # same as swe __lowercase= ids_to_tokens # same as bpe __lowercase= emoji __lowercase= np.max([len(lowerCAmelCase ) for w in self.vocab.keys()] ) __lowercase= re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) __lowercase= re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) __lowercase= re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) __lowercase= re.compile( r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowercase= re.compile( r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowercase= re.compile( r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) __lowercase= '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' __lowercase= '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' __lowercase= str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__(self ): return len(self.ids_to_tokens ) def _A (self , lowerCAmelCase ): __lowercase= self.content_repattera.sub('<URL>' , lowerCAmelCase ) __lowercase= self.content_repattera.sub('<EMAIL>' , lowerCAmelCase ) __lowercase= self.content_repattera.sub('<TEL>' , lowerCAmelCase ) __lowercase= self.content_repattera.sub('<DATE>' , lowerCAmelCase ) __lowercase= self.content_repattera.sub('<DATE>' , lowerCAmelCase ) __lowercase= self.content_repattera.sub('<PRICE>' , lowerCAmelCase ) __lowercase= content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowercase= content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def _A (self , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= text.replace(' ' , '<SP>' ) __lowercase= text.replace(' ' , '<SP>' ) __lowercase= text.replace('\r\n' , '<BR>' ) __lowercase= text.replace('\n' , '<BR>' ) __lowercase= text.replace('\r' , '<BR>' ) __lowercase= text.replace('\t' , '<TAB>' ) __lowercase= text.replace('—' , 'ー' ) __lowercase= text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowercase= text.replace(lowerCAmelCase , lowerCAmelCase ) if clean: __lowercase= self.clean_text(lowerCAmelCase ) def check_simbol(lowerCAmelCase ): __lowercase= x.encode() if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 2: __lowercase= (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0XC2A1 and c <= 0XC2BF) or (c >= 0XC780 and c <= 0XC783) or (c >= 0XCAB9 and c <= 0XCBBF) or (c >= 0XCC80 and c <= 0XCDA2) ): return True return False def checkuae(lowerCAmelCase ): __lowercase= x.encode() if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 3: __lowercase= (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0XE28080 and c <= 0XE2B07F: return True return False __lowercase= 0 __lowercase= [] while pos < len(lowerCAmelCase ): __lowercase= min(len(lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 __lowercase= [] # (token_id, token, pos) for e in range(lowerCAmelCase , lowerCAmelCase , -1 ): __lowercase= text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowerCAmelCase ) > 2: __lowercase= [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowerCAmelCase ) > 0: # the smallest token_id is adopted __lowercase, __lowercase, __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[0] )[0] result.append(lowerCAmelCase ) __lowercase= e else: __lowercase= pos + 1 __lowercase= text[pos:end] if check_simbol(lowerCAmelCase ): result.append('<KIGOU>' ) elif checkuae(lowerCAmelCase ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) __lowercase= end return result def _A (self , lowerCAmelCase , lowerCAmelCase="\n" ): __lowercase= [] __lowercase= [] __lowercase= self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowerCAmelCase ) > 0: words.append(bytearray(lowerCAmelCase ).decode('utf-8' , errors='replace' ) ) __lowercase= [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(lowerCAmelCase ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(lowerCAmelCase ) if len(lowerCAmelCase ) > 0: words.append(bytearray(lowerCAmelCase ).decode('utf-8' , errors='replace' ) ) __lowercase= ''.join(lowerCAmelCase ) return text
295
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 5_1_2, '''facebook/dpr-question_encoder-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': 5_1_2, '''facebook/dpr-reader-multiset-base''': 5_1_2, } lowerCAmelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } lowerCAmelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class A ( A_ ): UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int =DPRContextEncoderTokenizer class A ( A_ ): UpperCamelCase_ : Any =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer lowerCAmelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) lowerCAmelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(A_ ) class A : def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) elif titles is None or texts is None: __lowercase= titles if texts is None else texts return super().__call__( lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles] __lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts] __lowercase= len(lowerCAmelCase ) __lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.' __lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids'] __lowercase= { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase ) ] } if return_attention_mask is not False: __lowercase= [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase= attention_mask return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ): __lowercase= reader_input['input_ids'] __lowercase, __lowercase, __lowercase= reader_output[:3] __lowercase= len(lowerCAmelCase ) __lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ ) __lowercase= [] for doc_id in sorted_docs: __lowercase= list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase= sequence_ids.index(self.pad_token_id ) else: __lowercase= len(lowerCAmelCase ) __lowercase= self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): __lowercase= [] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase ) __lowercase= [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __lowercase= end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A_ ) class A ( A_ , A_ ): UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : Dict =DPRReaderTokenizer
295
1
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]: '''simple docstring''' __lowercase= 0 __lowercase= len(lowercase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowercase= left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowercase__ ): return None __lowercase= sorted_collection[point] if current_item == item: return point else: if point < left: __lowercase= left __lowercase= point elif point > right: __lowercase= right __lowercase= point else: if item < current_item: __lowercase= point - 1 else: __lowercase= point + 1 return None def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None __lowercase= left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(lowercase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) elif point > right: return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( lowercase__ , lowercase__ , lowercase__ , point - 1 ) else: return interpolation_search_by_recursion( lowercase__ , lowercase__ , point + 1 , lowercase__ ) def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' if collection != sorted(lowercase__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowerCAmelCase = 0 if debug == 1: lowerCAmelCase = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('''Sequence must be ascending sorted to apply interpolation search''') lowerCAmelCase = 6_7 lowerCAmelCase = interpolation_search(collection, target) if result is not None: print(F'{target} found at positions: {result}') else: print('''Not found''')
295
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): def __init__(self ): super().__init__() __lowercase= nn.Linear(3 , 4 ) __lowercase= nn.BatchNormad(4 ) __lowercase= nn.Linear(4 , 5 ) def _A (self , lowerCAmelCase ): return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class A ( A_ ): def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ): return (args[0] + 1,) + args[1:], kwargs class A ( A_ ): def _A (self , lowerCAmelCase , lowerCAmelCase ): return output + 1 class A ( unittest.TestCase ): def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) self.assertEqual(test_model._hf_hook , lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= ModelHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) ) self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(x + 1 ) __lowercase= test_model(x + 2 ) __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PreForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks __lowercase= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 ) def _A (self ): __lowercase= ModelForTest() __lowercase= torch.randn(2 , 3 ) __lowercase= test_model(lowerCAmelCase ) __lowercase= PostForwardHook() add_hook_to_module(lowerCAmelCase , lowerCAmelCase ) __lowercase= test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) __lowercase= True __lowercase= test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) ) __lowercase= torch.randn(2 , 3 ).to(0 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , torch.device(0 ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload __lowercase= { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def _A (self ): __lowercase= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices __lowercase= 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device __lowercase= torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) __lowercase= torch.randn(2 , 3 ) __lowercase= model(lowerCAmelCase ) self.assertEqual(output.device , lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
295
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A ( unittest.TestCase ): def _A (self ): __lowercase= logging.get_logger() # the current default level is logging.WARNING __lowercase= logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) def _A (self ): __lowercase= logging.get_verbosity() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowerCAmelCase ) as cl: logger.warning(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase ) __lowercase= logging.log_levels[env_level_str] __lowercase= logging.get_verbosity() self.assertEqual( lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level __lowercase= '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def _A (self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __lowercase= logging.logging.getLogger() with CaptureLogger(lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def _A (self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' ) __lowercase= 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowerCAmelCase ) as cl: logger.warning_advice(lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
295
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class A ( A_ ): UpperCamelCase_ : List[str] ='''xlm''' UpperCamelCase_ : Union[str, Any] ={ '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__(self , lowerCAmelCase=3_0_1_4_5 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=True , lowerCAmelCase=5_1_2 , lowerCAmelCase=2_0_4_8**-0.5 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.02 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=5 , lowerCAmelCase=True , lowerCAmelCase="first" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=5 , lowerCAmelCase=5 , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=0 , **lowerCAmelCase , ): __lowercase= vocab_size __lowercase= emb_dim __lowercase= n_layers __lowercase= n_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= gelu_activation __lowercase= sinusoidal_embeddings __lowercase= causal __lowercase= asm __lowercase= n_langs __lowercase= use_lang_emb __lowercase= layer_norm_eps __lowercase= bos_index __lowercase= eos_index __lowercase= pad_index __lowercase= unk_index __lowercase= mask_index __lowercase= is_encoder __lowercase= max_position_embeddings __lowercase= embed_init_std __lowercase= init_std __lowercase= summary_type __lowercase= summary_use_proj __lowercase= summary_activation __lowercase= summary_proj_to_labels __lowercase= summary_first_dropout __lowercase= start_n_top __lowercase= end_n_top __lowercase= mask_token_id __lowercase= lang_id if "n_words" in kwargs: __lowercase= kwargs['n_words'] super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase ) class A ( A_ ): @property def _A (self ): if self.task == "multiple-choice": __lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase= {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
295
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase = '''▁''' lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } lowerCAmelCase = { '''google/pegasus-xsum''': 5_1_2, } lowerCAmelCase = logging.get_logger(__name__) class A ( A_ ): UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int =['''input_ids''', '''attention_mask'''] def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ): __lowercase= offset if additional_special_tokens is not None: if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise TypeError( f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is' f' {type(lowerCAmelCase )}' ) __lowercase= ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 ) ] if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) __lowercase= additional_special_tokens_extended else: __lowercase= [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] __lowercase= {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , ) __lowercase= mask_token_sent __lowercase= vocab_file __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) # add special tokens to encoder dict __lowercase= { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) __lowercase= {v: k for k, v in self.encoder.items()} @property def _A (self ): return len(self.sp_model ) + self.offset def _A (self ): __lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ): __lowercase= self.__dict__.copy() __lowercase= None return state def __setstate__(self , lowerCAmelCase ): __lowercase= d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase= {} __lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _A (self , lowerCAmelCase ): return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase ) def _A (self , lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] __lowercase= self.sp_model.piece_to_id(lowerCAmelCase ) return sp_id + self.offset def _A (self , lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: __lowercase= self.sp_model.IdToPiece(index - self.offset ) return token def _A (self , lowerCAmelCase ): __lowercase= [] __lowercase= '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token __lowercase= [] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def _A (self , lowerCAmelCase=False ): return 1 def _A (self , lowerCAmelCase ): __lowercase= set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _A (self , lowerCAmelCase , lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase , 'wb' ) as fi: __lowercase= self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
295
1
from __future__ import annotations from functools import lru_cache from math import ceil lowerCAmelCase = 1_0_0 lowerCAmelCase = set(range(3, NUM_PRIMES, 2)) primes.add(2) lowerCAmelCase = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_0_0 ) def _lowerCamelCase( lowercase__ ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __lowercase= set() __lowercase= 42 __lowercase= 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def _lowerCamelCase( lowercase__ = 5_0_0_0 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , lowercase__ ): if len(partition(lowercase__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'{solution() = }')
295
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope __lowercase= self.vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_token_type_ids: __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ): __lowercase= self.num_labels __lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ( ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), ( __lowercase ), )= config_and_inputs __lowercase= { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class A ( A_ , A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Optional[Any] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) UpperCamelCase_ : Tuple =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly UpperCamelCase_ : List[str] =( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= inputs_dict['labels'] __lowercase= inputs_dict['labels'] __lowercase= torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , ) __lowercase= torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase ) return inputs_dict def _A (self ): __lowercase= OpenAIGPTModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase ) __lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is __lowercase= [ 4_8_1, 4_7_3_5, 5_4_4, 2_4_6, 9_6_3, 8_7_0, 7_6_2, 2_3_9, 2_4_4, 4_0_4_7_7, 2_4_4, 2_4_9, 7_1_9, 8_8_1, 4_8_7, 5_4_4, 2_4_0, 2_4_4, 6_0_3, 4_8_1, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
295
1
import torch from diffusers import DiffusionPipeline class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase ): super().__init__() self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) def __call__(self ): __lowercase= torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __lowercase= 1 __lowercase= self.unet(lowerCAmelCase , lowerCAmelCase ).sample __lowercase= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample __lowercase= scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase ) return result
295
from math import isqrt def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) ) def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int: '''simple docstring''' __lowercase= 0 __lowercase= 1 __lowercase= 7 while prime_candidate < max_prime: primes_count += is_prime(lowercase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'{solution() = }')
295
1
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class A : def __init__(self , lowerCAmelCase , lowerCAmelCase = 1_3 , lowerCAmelCase = 6_4 , lowerCAmelCase = 2 , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = 1_2_8 , lowerCAmelCase=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase = 7 , lowerCAmelCase = 4 , lowerCAmelCase = 3_7 , lowerCAmelCase = "gelu" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1_0 , lowerCAmelCase = 0.02 , lowerCAmelCase = 2 , lowerCAmelCase = 1 , lowerCAmelCase = 1_2_8 , lowerCAmelCase = [2, 2, 2, 2] , lowerCAmelCase = 2 , lowerCAmelCase = 2 , ): __lowercase= parent __lowercase= batch_size __lowercase= image_size __lowercase= patch_size __lowercase= num_channels __lowercase= is_training __lowercase= use_labels __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= encoder_stride __lowercase= num_attention_outputs __lowercase= embed_dim __lowercase= embed_dim + 1 __lowercase= resolution __lowercase= depths __lowercase= hidden_sizes __lowercase= dim __lowercase= mlp_expansion_ratio def _A (self ): __lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= self.get_config() return config, pixel_values, labels def _A (self ): return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= TFEfficientFormerModel(config=lowerCAmelCase ) __lowercase= model(lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.type_sequence_label_size __lowercase= TFEfficientFormerForImageClassification(lowerCAmelCase ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowercase= 1 __lowercase= TFEfficientFormerForImageClassification(lowerCAmelCase ) __lowercase= floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Union[str, Any] =( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] =False UpperCamelCase_ : Dict =False UpperCamelCase_ : Tuple =False UpperCamelCase_ : Union[str, Any] =False UpperCamelCase_ : Tuple =False def _A (self ): __lowercase= TFEfficientFormerModelTester(self ) __lowercase= ConfigTester( self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 ) def _A (self ): self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def _A (self ): pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def _A (self ): pass def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= model_class(lowerCAmelCase ) __lowercase= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def _A (self ): def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= model_class(lowerCAmelCase ) __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) , training=lowerCAmelCase ) __lowercase= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase= getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) if hasattr(self.model_tester , 'encoder_seq_length' ): __lowercase= self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: __lowercase= seq_length * self.model_tester.chunk_length else: __lowercase= self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: __lowercase= outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase , (list, tuple) ) self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) __lowercase= getattr(self.model_tester , 'seq_length' , lowerCAmelCase ) __lowercase= getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase= True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ): __lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def _A (self ): for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= TFEfficientFormerModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() __lowercase= True __lowercase= getattr(self.model_tester , 'seq_length' , lowerCAmelCase ) __lowercase= getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase ) __lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase ) __lowercase= getattr(self.model_tester , 'chunk_length' , lowerCAmelCase ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): __lowercase= encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: __lowercase= True __lowercase= False __lowercase= True __lowercase= model_class(lowerCAmelCase ) __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) , training=lowerCAmelCase ) __lowercase= outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase= True __lowercase= model_class(lowerCAmelCase ) __lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) , training=lowerCAmelCase ) __lowercase= outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _A (self ): # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model __lowercase= model_class(lowerCAmelCase ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes __lowercase= { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase ) for key, val in model.input_signature.items() if key in model.dummy_inputs } __lowercase= model(lowerCAmelCase ) self.assertTrue(outputs_dict is not None ) def _lowerCamelCase( ) -> Optional[int]: '''simple docstring''' __lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def _A (self ): return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def _A (self ): __lowercase= TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' ) # forward pass __lowercase= model(**lowerCAmelCase , training=lowerCAmelCase ) # verify the logits __lowercase= tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) ) @slow def _A (self ): __lowercase= TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) __lowercase= self.default_image_processor __lowercase= prepare_img() __lowercase= image_processor(images=lowerCAmelCase , return_tensors='tf' ) # forward pass __lowercase= model(**lowerCAmelCase , training=lowerCAmelCase ) # verify the logits __lowercase= tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) __lowercase= tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
295
from __future__ import annotations def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= 2 __lowercase= [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowercase__ ) if n > 1: factors.append(lowercase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
295
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=A_ ) class A ( A_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization UpperCamelCase_ : str =field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCamelCase_ : ClassVar[Features] =Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) UpperCamelCase_ : ClassVar[Features] =Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) UpperCamelCase_ : str ="question" UpperCamelCase_ : str ="context" UpperCamelCase_ : str ="answers" @property def _A (self ): return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
295
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase = None lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } class A ( A_ ): UpperCamelCase_ : Dict =VOCAB_FILES_NAMES UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str =['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] =TaTokenizer UpperCamelCase_ : List[int] =[] def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens __lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) __lowercase= vocab_file __lowercase= False if not self.vocab_file else True __lowercase= extra_ids @staticmethod def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: __lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , ) return max_model_length def _A (self , lowerCAmelCase , lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase= os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file , lowerCAmelCase ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: __lowercase= token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _A (self , lowerCAmelCase , lowerCAmelCase = None ): __lowercase= [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _A (self ): return list( set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def _A (self ): return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
295
1
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Tuple: '''simple docstring''' __lowercase= OmegaConf.load(lowercase__ ) __lowercase= torch.load(lowercase__ , map_location='cpu' )['model'] __lowercase= list(state_dict.keys() ) # extract state_dict for VQVAE __lowercase= {} __lowercase= 'first_stage_model.' for key in keys: if key.startswith(lowercase__ ): __lowercase= state_dict[key] # extract state_dict for UNetLDM __lowercase= {} __lowercase= 'model.diffusion_model.' for key in keys: if key.startswith(lowercase__ ): __lowercase= state_dict[key] __lowercase= config.model.params.first_stage_config.params __lowercase= config.model.params.unet_config.params __lowercase= VQModel(**lowercase__ ).eval() vqvae.load_state_dict(lowercase__ ) __lowercase= UNetLDMModel(**lowercase__ ).eval() unet.load_state_dict(lowercase__ ) __lowercase= DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowercase__ , ) __lowercase= LDMPipeline(lowercase__ , lowercase__ , lowercase__ ) pipeline.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) lowerCAmelCase = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
295
from collections.abc import Sequence def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float: '''simple docstring''' if not arr: return 0 __lowercase= 0 if allow_empty_subarrays else float('-inf' ) __lowercase= 0.0 for num in arr: __lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowercase= max(lowercase__ , lowercase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
295
1
class A : # Public class to implement a graph def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= row __lowercase= col __lowercase= graph def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): # Checking all 8 elements surrounding nth element __lowercase= [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __lowercase= [-1, 0, 1, -1, 1, -1, 0, 1] __lowercase= True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase ) def _A (self ): # And finally, count all islands. __lowercase= [[False for j in range(self.COL )] for i in range(self.ROW )] __lowercase= 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) count += 1 return count
295
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class A ( A_ , unittest.TestCase ): UpperCamelCase_ : Any =PriorTransformer UpperCamelCase_ : List[str] ='''hidden_states''' @property def _A (self ): __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= 4 __lowercase= 8 __lowercase= 7 __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A (self ): return (4, 8) @property def _A (self ): return (4, 8) def _A (self ): __lowercase= { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __lowercase= self.dummy_input return init_dict, inputs_dict def _A (self ): __lowercase, __lowercase= PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(lowerCAmelCase ) __lowercase= model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A (self ): __lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common() __lowercase= self.model_class(**lowerCAmelCase ) __lowercase= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase= [*signature.parameters.keys()] __lowercase= ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , lowerCAmelCase ) def _A (self ): __lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __lowercase= model.to(lowerCAmelCase ) if hasattr(lowerCAmelCase , 'set_default_attn_processor' ): model.set_default_attn_processor() __lowercase= self.get_dummy_seed_input() with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] __lowercase= output[0, :5].flatten().cpu() print(lowerCAmelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ): torch.manual_seed(lowerCAmelCase ) __lowercase= batch_size __lowercase= embedding_dim __lowercase= num_embeddings __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase ) __lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def _A (self , lowerCAmelCase , lowerCAmelCase ): __lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(lowerCAmelCase ) __lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase ) with torch.no_grad(): __lowercase= model(**lowerCAmelCase )[0] assert list(sample.shape ) == [1, 7_6_8] __lowercase= sample[0, :8].flatten().cpu() print(lowerCAmelCase ) __lowercase= torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
295
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='''%(message)s''') def _lowerCamelCase( lowercase__ ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray: '''simple docstring''' __lowercase= np.nan for i in range(lowercase__ ): __lowercase= features[:, labels == i] __lowercase= data.mean(1 ) # Centralize the data of class i __lowercase= data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowercase= np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray: '''simple docstring''' __lowercase= features.mean(1 ) __lowercase= np.nan for i in range(lowercase__ ): __lowercase= features[:, labels == i] __lowercase= data.shape[1] __lowercase= data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowercase= device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def _lowerCamelCase( lowercase__ , lowercase__ ) -> np.ndarray: '''simple docstring''' if features.any(): __lowercase= features.mean(1 ) # Center the dataset __lowercase= features - np.reshape(lowercase__ , (data_mean.size, 1) ) __lowercase= np.dot(lowercase__ , centered_data.T ) / features.shape[1] __lowercase, __lowercase= np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first __lowercase= eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowercase= np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: __lowercase, __lowercase= eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) __lowercase= eigenvectors[:, ::-1][:, :dimensions] __lowercase, __lowercase, __lowercase= np.linalg.svd(lowercase__ ) __lowercase= svd_matrix[:, 0:dimensions] __lowercase= np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowercase= np.array([0, 0, 0, 1, 1] ) __lowercase= 2 __lowercase= 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: __lowercase= linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _lowerCamelCase( ) -> None: '''simple docstring''' __lowercase= np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowercase= 2 __lowercase= np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: __lowercase= principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
295
def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' if collection == []: return [] # get some information about the collection __lowercase= len(lowercase__ ) __lowercase= max(lowercase__ ) __lowercase= min(lowercase__ ) # create the counting array __lowercase= coll_max + 1 - coll_min __lowercase= [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): __lowercase= counting_arr[i] + counting_arr[i - 1] # create the output collection __lowercase= [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): __lowercase= collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _lowerCamelCase( lowercase__ ) -> List[str]: '''simple docstring''' return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
295
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _lowerCamelCase( lowercase__ ) -> Optional[int]: '''simple docstring''' return 1 / (1 + np.exp(-z )) def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[str]: '''simple docstring''' return (-y * np.log(lowercase__ ) - (1 - y) * np.log(1 - h )).mean() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str: '''simple docstring''' __lowercase= np.dot(lowercase__ , lowercase__ ) return np.sum(y * scores - np.log(1 + np.exp(lowercase__ ) ) ) def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=7_0_0_0_0 ) -> Optional[int]: '''simple docstring''' __lowercase= np.zeros(x.shape[1] ) for iterations in range(lowercase__ ): __lowercase= np.dot(lowercase__ , lowercase__ ) __lowercase= sigmoid_function(lowercase__ ) __lowercase= np.dot(x.T , h - y ) / y.size __lowercase= theta - alpha * gradient # updating the weights __lowercase= np.dot(lowercase__ , lowercase__ ) __lowercase= sigmoid_function(lowercase__ ) __lowercase= cost_function(lowercase__ , lowercase__ ) if iterations % 1_0_0 == 0: print(F'loss: {j} \t' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": lowerCAmelCase = datasets.load_iris() lowerCAmelCase = iris.data[:, :2] lowerCAmelCase = (iris.target != 0) * 1 lowerCAmelCase = 0.1 lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print('''theta: ''', theta) # printing the theta i.e our weights vector def _lowerCamelCase( lowercase__ ) -> Union[str, Any]: '''simple docstring''' return sigmoid_function( np.dot(lowercase__ , lowercase__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((lowerCAmelCase) ,(lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max()) ((lowerCAmelCase) ,(lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max()) ((lowerCAmelCase) ,(lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()] lowerCAmelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
295
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class A ( A_ ): def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= type_vocab_size __lowercase= type_sequence_label_size __lowercase= initializer_range __lowercase= num_labels __lowercase= num_choices __lowercase= scope def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= None __lowercase= None __lowercase= None if self.use_labels: __lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase= ids_tensor([self.batch_size] , self.num_choices ) __lowercase= self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _A (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , lowerCAmelCase ) __lowercase= model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForMaskedLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_labels __lowercase= DistilBertForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= self.num_choices __lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() __lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase= model( lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A (self ): __lowercase= self.prepare_config_and_inputs() ((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( A_ , A_ , unittest.TestCase ): UpperCamelCase_ : Any =( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : Optional[int] =( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : str =True UpperCamelCase_ : str =True UpperCamelCase_ : Union[str, Any] =True UpperCamelCase_ : Optional[int] =True def _A (self ): __lowercase= DistilBertModelTester(self ) __lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 ) def _A (self ): self.config_tester.run_common_tests() def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase ) def _A (self ): __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase ) @slow def _A (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase= DistilBertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase= True __lowercase= model_class(config=lowerCAmelCase ) __lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) __lowercase= torch.jit.trace( lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) ) __lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) ) @require_torch class A ( unittest.TestCase ): @slow def _A (self ): __lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' ) __lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0] __lowercase= torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCAmelCase ) __lowercase= torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
295
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A ( unittest.TestCase , A_ ): def _A (self ): __lowercase= load_tool('text-to-speech' ) self.tool.setup() def _A (self ): # SpeechT5 isn't deterministic torch.manual_seed(0 ) __lowercase= self.tool('hey' ) __lowercase= result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) def _A (self ): # SpeechT5 isn't deterministic torch.manual_seed(0 ) __lowercase= self.tool('hey' ) __lowercase= result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
295
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase= [False] * len(lowercase__ ) __lowercase= [] queue.append(lowercase__ ) __lowercase= True while queue: __lowercase= queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) __lowercase= True __lowercase= u return visited[t] def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int: '''simple docstring''' __lowercase= [-1] * (len(lowercase__ )) __lowercase= 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __lowercase= float('Inf' ) __lowercase= sink while s != source: # Find the minimum value in select path __lowercase= min(lowercase__ , graph[parent[s]][s] ) __lowercase= parent[s] max_flow += path_flow __lowercase= sink while v != source: __lowercase= parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __lowercase= parent[v] return max_flow lowerCAmelCase = [ [0, 1_6, 1_3, 0, 0, 0], [0, 0, 1_0, 1_2, 0, 0], [0, 4, 0, 0, 1_4, 0], [0, 0, 9, 0, 0, 2_0], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] lowerCAmelCase ,lowerCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
295
1
from math import sqrt def _lowerCamelCase( lowercase__ ) -> int: '''simple docstring''' __lowercase= 0 for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ): if n % i == 0 and i != sqrt(lowercase__ ): total += i + n // i elif i == sqrt(lowercase__ ): total += i return total - n def _lowerCamelCase( lowercase__ = 1_0_0_0_0 ) -> int: '''simple docstring''' __lowercase= sum( i for i in range(1 , lowercase__ ) if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
295
from __future__ import annotations def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool: '''simple docstring''' __lowercase= get_failure_array(lowercase__ ) # 2) Step through text searching for pattern __lowercase, __lowercase= 0, 0 # index into text, pattern while i < len(lowercase__ ): if pattern[j] == text[i]: if j == (len(lowercase__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase= failure[j - 1] continue i += 1 return False def _lowerCamelCase( lowercase__ ) -> list[int]: '''simple docstring''' __lowercase= [0] __lowercase= 0 __lowercase= 1 while j < len(lowercase__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase= failure[i - 1] continue j += 1 failure.append(lowercase__ ) return failure if __name__ == "__main__": # Test 1) lowerCAmelCase = '''abc1abc12''' lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCAmelCase = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCAmelCase = '''ABABX''' lowerCAmelCase = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCAmelCase = '''AAAB''' lowerCAmelCase = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCAmelCase = '''abcdabcy''' lowerCAmelCase = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCAmelCase = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
295
1
from itertools import permutations def _lowerCamelCase( lowercase__ ) -> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __lowercase= [7, 1_1, 1_3, 1_7] for i, test in enumerate(lowercase__ ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def _lowerCamelCase( lowercase__ = 1_0 ) -> int: '''simple docstring''' return sum( int(''.join(map(lowercase__ , lowercase__ ) ) ) for num in permutations(range(lowercase__ ) ) if is_substring_divisible(lowercase__ ) ) if __name__ == "__main__": print(F'{solution() = }')
295
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['''GPTNeoXTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXForCausalLM''', '''GPTNeoXForQuestionAnswering''', '''GPTNeoXForSequenceClassification''', '''GPTNeoXForTokenClassification''', '''GPTNeoXLayer''', '''GPTNeoXModel''', '''GPTNeoXPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
295
1