code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __lowerCAmelCase : Any =logging.get_logger(__name__) __lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[str] =[ 'small', 'small-base', 'medium', 'medium-base', 'intermediate', 'intermediate-base', 'large', 'large-base', 'xlarge', 'xlarge-base', ] __lowerCAmelCase : str ={ 'vocab_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json', 'funnel-transformer/small-base': ( 'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json' ), 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json', 'funnel-transformer/medium-base': ( 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json', 'funnel-transformer/large-base': ( 'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json' ), 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json', 'funnel-transformer/xlarge-base': ( 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : List[Any] ={f"""funnel-transformer/{name}""": 5_1_2 for name in _model_names} __lowerCAmelCase : List[Any] ={f"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names} class _lowercase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Dict = FunnelTokenizer SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : str = 2 def __init__( self :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str="<unk>" , lowerCAmelCase__ :List[Any]="<sep>" , lowerCAmelCase__ :Optional[int]="<pad>" , lowerCAmelCase__ :str="<cls>" , lowerCAmelCase__ :Union[str, Any]="<mask>" , lowerCAmelCase__ :Dict="<s>" , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=None , lowerCAmelCase__ :int="##" , **lowerCAmelCase__ :List[Any] , ) -> Optional[Any]: super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , bos_token=__a , eos_token=__a , clean_text=__a , tokenize_chinese_chars=__a , strip_accents=__a , wordpieces_prefix=__a , **__a , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __a ) != do_lower_case or normalizer_state.get('''strip_accents''' , __a ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __a ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : Any = getattr(__a , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : List[Any] = do_lower_case __SCREAMING_SNAKE_CASE : str = strip_accents __SCREAMING_SNAKE_CASE : List[Any] = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Optional[Any] = normalizer_class(**__a ) __SCREAMING_SNAKE_CASE : Any = do_lower_case def __magic_name__( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> Tuple: __SCREAMING_SNAKE_CASE : str = [self.sep_token_id] __SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
9
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :int = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __magic_name__ :Dict = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = TextaTextGenerationPipeline(model=__a , tokenizer=__a ) return generator, ["Something to write", "Something else"] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = generator('Something there' ) self.assertEqual(__a , [{'generated_text': ANY(__a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) lowerCAmelCase__ :Tuple = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__a ) self.assertEqual( __a , [ [{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}], [{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}], ] , ) lowerCAmelCase__ :Dict = generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__a ) self.assertEqual( __a , [ [{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}], [{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}], ] , ) with self.assertRaises(__a ): generator(4 ) @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' ) # do_sample=False necessary for reproducibility lowerCAmelCase__ :Dict = generator('Something there' , do_sample=__a ) self.assertEqual(__a , [{'generated_text': ''}] ) lowerCAmelCase__ :str = 3 lowerCAmelCase__ :Union[str, Any] = generator( 'Something there' , num_return_sequences=__a , num_beams=__a , ) lowerCAmelCase__ :str = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(__a , __a ) lowerCAmelCase__ :Optional[Any] = generator('This is a test' , do_sample=__a , num_return_sequences=2 , return_tensors=__a ) self.assertEqual( __a , [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ] , ) lowerCAmelCase__ :int = generator.model.config.eos_token_id lowerCAmelCase__ :Dict = '<pad>' lowerCAmelCase__ :Optional[Any] = generator( ['This is a test', 'This is a second test'] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , ) self.assertEqual( __a , [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ] , ) @require_tf def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' ) # do_sample=False necessary for reproducibility lowerCAmelCase__ :List[Any] = generator('Something there' , do_sample=__a ) self.assertEqual(__a , [{'generated_text': ''}] )
293
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
0
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : Dict = logging.get_logger('''transformers.models.speecht5''') def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' hf_model.apply_weight_norm() SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_g'''] SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.weight_v'''] SCREAMING_SNAKE_CASE__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): SCREAMING_SNAKE_CASE__ = checkpoint[F'''upsamples.{i}.1.weight_g'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''upsamples.{i}.1.weight_v'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_g'''] SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.weight_v'''] SCREAMING_SNAKE_CASE__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def UpperCAmelCase_ ( _A , _A , _A , _A=None , _A=None , ): '''simple docstring''' if config_path is not None: SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig.from_pretrained(_A ) else: SCREAMING_SNAKE_CASE__ = SpeechTaHifiGanConfig() SCREAMING_SNAKE_CASE__ = SpeechTaHifiGan(_A ) SCREAMING_SNAKE_CASE__ = torch.load(_A ) load_weights(orig_checkpoint['''model''']['''generator'''] , _A , _A ) SCREAMING_SNAKE_CASE__ = np.load(_A ) SCREAMING_SNAKE_CASE__ = stats[0].reshape(-1 ) SCREAMING_SNAKE_CASE__ = stats[1].reshape(-1 ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A ).float() SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A ).float() model.save_pretrained(_A ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(_A ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) _SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
314
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
0
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _lowercase: List[str] = logging.get_logger(__name__) _lowercase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _lowercase: Any = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } _lowercase: str = { 'Salesforce/codegen-350M-mono': 2048, } class _lowercase ( lowerCamelCase_ ): """simple docstring""" __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ["input_ids", "attention_mask"] __A = CodeGenTokenizer def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_="<|endoftext|>" , lowerCamelCase_=False , **lowerCamelCase_ , ): """simple docstring""" super().__init__( __a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , ) if kwargs.pop("add_bos_token" , __a ): a = kwargs.pop("name_or_path" , "" ) raise ValueError( "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n" F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." " so that the fast tokenizer works correctly." ) a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space: a = getattr(__a , pre_tok_state.pop("type" ) ) a = add_prefix_space a = pre_tok_class(**__a ) a = add_prefix_space def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" a = kwargs.get("is_split_into_words" , __a ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__a , **__a ) def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" a = kwargs.get("is_split_into_words" , __a ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__a , **__a ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" a = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" a = super().decode( token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , ) if truncate_before_pattern is not None and len(__a ) > 0: a = self.truncate(__a , __a ) return decoded_text def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" def find_re(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): a = pattern.search(__a , __a ) return m.start() if m else -1 a = [re.compile(__a , re.MULTILINE ) for pattern in truncate_before_pattern] a = list(re.finditer("^print" , __a , re.MULTILINE ) ) if len(__a ) > 1: a = completion[: prints[1].start()] a = list(re.finditer("^def" , __a , re.MULTILINE ) ) if len(__a ) > 1: a = completion[: defs[1].start()] a = 0 a = [ pos for pos in [find_re(__a , __a , __a ) for terminal in terminals] if pos != -1 ] if len(__a ) > 0: return completion[: min(__a )] else: return completion
227
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]: A_ = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> List[Any]: for i in range(config.num_hidden_layers ): if base_model: A_ = """""" else: A_ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]: A_ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(UpperCAmelCase__, UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int: A_ = dct.pop(UpperCAmelCase__ ) A_ = val def UpperCAmelCase__ ( ) -> Dict: A_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> List[str]: A_ = BitConfig( global_padding="""same""", layer_type="""bottleneck""", depths=(3, 4, 9), out_features=["""stage3"""], embedding_dynamic_padding=UpperCAmelCase__, ) A_ = ViTHybridConfig(backbone_config=UpperCAmelCase__, image_size=3_84, num_labels=10_00 ) A_ = False # load original model from timm A_ = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ = timm_model.state_dict() if base_model: remove_classification_head_(UpperCAmelCase__ ) A_ = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ ) for src, dest in rename_keys: rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) A_ = """huggingface/label-files""" A_ = """imagenet-1k-id2label.json""" A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) ) A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": A_ = ViTHybridModel(UpperCAmelCase__ ).eval() else: A_ = ViTHybridForImageClassification(UpperCAmelCase__ ).eval() model.load_state_dict(UpperCAmelCase__ ) # create image processor A_ = create_transform(**resolve_data_config({}, model=UpperCAmelCase__ ) ) A_ = transform.transforms A_ = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } A_ = ViTHybridImageProcessor( do_resize=UpperCAmelCase__, size={"""shortest_edge""": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=UpperCAmelCase__, crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]}, do_normalize=UpperCAmelCase__, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) A_ = prepare_img() A_ = transform(UpperCAmelCase__ ).unsqueeze(0 ) A_ = processor(UpperCAmelCase__, return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(UpperCAmelCase__, UpperCAmelCase__ ) # verify logits with torch.no_grad(): A_ = model(UpperCAmelCase__ ) A_ = outputs.logits print("""Predicted class:""", logits.argmax(-1 ).item() ) if base_model: A_ = timm_model.forward_features(UpperCAmelCase__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(UpperCAmelCase__, outputs.pooler_output, atol=1e-3 ) else: A_ = timm_model(UpperCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase__ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) __lowerCamelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
162
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class lowercase_ : A__ : int = BlenderbotConfig A__ : int = {} A__ : Optional[int] = """gelu""" def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=9_9 , __UpperCamelCase=3_2 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=2_0 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ): """simple docstring""" UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = eos_token_id UpperCamelCase_ = pad_token_id UpperCamelCase_ = bos_token_id def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCamelCase_ = prepare_blenderbot_inputs_dict(__a , __a , __a ) return config, inputs_dict def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = TFBlenderbotModel(config=__a ).get_decoder() UpperCamelCase_ = inputs_dict["""input_ids"""] UpperCamelCase_ = input_ids[:1, :] UpperCamelCase_ = inputs_dict["""attention_mask"""][:1, :] UpperCamelCase_ = inputs_dict["""head_mask"""] UpperCamelCase_ = 1 # first forward pass UpperCamelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCamelCase_ = model(__a , attention_mask=__a )[0] UpperCamelCase_ = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx] UpperCamelCase_ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Dict , a__ : Union[str, Any]=None , a__ : Optional[int]=None , a__ : Tuple=None , a__ : Any=None , a__ : Union[str, Any]=None , ) -> List[Any]: if attention_mask is None: UpperCamelCase_ = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCamelCase_ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCamelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCamelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): A__ : Any = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () A__ : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () A__ : Optional[Any] = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) A__ : Tuple = True A__ : Any = False A__ : Tuple = False def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = TFBlenderbotModelTester(self ) UpperCamelCase_ = ConfigTester(self , config_class=__a ) def lowerCamelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class lowercase_ ( unittest.TestCase ): A__ : Tuple = ["""My friends are cool but they eat too many carbs."""] A__ : List[str] = """facebook/blenderbot-400M-distill""" @cached_property def lowerCamelCase_ ( self ): """simple docstring""" return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.tokenizer(self.src_text , return_tensors="""tf""" ) UpperCamelCase_ = self.model.generate( model_inputs.input_ids , ) UpperCamelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
122
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
0
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 snake_case : Any = { # 1536-bit 5: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), 'generator': 2, }, } class snake_case_ : def __init__( self :Any ,__snake_case :int = 14 ) -> Union[str, Any]: if group not in primes: raise ValueError('Unsupported Group' ) a__ = primes[group]['prime'] a__ = primes[group]['generator'] a__ = int(hexlify(urandom(32 ) ) ,base=16 ) def lowerCamelCase__( self :Dict ) -> Union[str, Any]: return hex(self.__private_key )[2:] def lowerCamelCase__( self :Any ) -> Union[str, Any]: a__ = pow(self.generator ,self.__private_key ,self.prime ) return hex(__a )[2:] def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__a ,(self.prime - 1) // 2 ,self.prime ) == 1 ) def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> List[Any]: a__ = int(__a ,base=16 ) if not self.is_valid_public_key(__a ): raise ValueError('Invalid public key' ) a__ = pow(__a ,self.__private_key ,self.prime ) return shaaaa(str(__a ).encode() ).hexdigest() @staticmethod def lowerCamelCase__( __snake_case :int ,__snake_case :int ) -> Dict: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__a ,(prime - 1) // 2 ,__a ) == 1 ) @staticmethod def lowerCamelCase__( __snake_case :str ,__snake_case :str ,__snake_case :int = 14 ) -> str: a__ = int(__a ,base=16 ) a__ = int(__a ,base=16 ) a__ = primes[group]['prime'] if not DiffieHellman.is_valid_public_key_static(__a ,__a ): raise ValueError('Invalid public key' ) a__ = pow(__a ,__a ,__a ) return shaaaa(str(__a ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
240
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
0
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _snake_case = re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex _snake_case = 10 _snake_case = 256 def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if len(UpperCamelCase__ ) < MIN_NUM_TOKENS: return None _a : Dict = MinHash(num_perm=UpperCamelCase__ ) for token in set(UpperCamelCase__ ): min_hash.update(token.encode() ) return min_hash def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' return {t for t in NON_ALPHA.split(UpperCamelCase__ ) if len(t.strip() ) > 0} class UpperCamelCase : def __init__( self : Dict , *, UpperCAmelCase__ : float = 0.8_5 , ) -> Union[str, Any]: _a : Dict = duplication_jaccard_threshold _a : Dict = NUM_PERM _a : Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) _a : List[str] = defaultdict(__a ) def _lowercase ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : MinHash ) -> Any: _a : Dict = self._index.query(__a ) if code_key in self._index.keys: print(f"""Duplicate key {code_key}""" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def _lowercase ( self : str ) -> Dict: _a : Dict = [] for base, duplicates in self._duplicate_clusters.items(): _a : str = [base] + list(__a ) # reformat the cluster to be a list of dict _a : Optional[int] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]: _a : str = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a , _a : Tuple = element _a : Dict = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(UpperCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : List[Any] = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase__ ) ) , max_queue_size=1_0_0 ) ): di.add(UpperCamelCase__ , UpperCamelCase__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = get_tokens(UpperCamelCase__ ) _a : Any = get_tokens(UpperCamelCase__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _snake_case = None def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = [] for elementa in cluster: _a : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: _a : Optional[int] = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(UpperCamelCase__ , UpperCamelCase__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: _a : List[str] = 1 extremes.append(UpperCamelCase__ ) return extremes def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' global _shared_dataset _a : Dict = dataset _a : Optional[int] = [] _a : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( UpperCamelCase__ , UpperCamelCase__ , ) , total=len(UpperCamelCase__ ) , ): extremes_list.append(UpperCamelCase__ ) return extremes_list def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 0.85 ): '''simple docstring''' _a : str = make_duplicate_clusters(UpperCamelCase__ , UpperCamelCase__ ) _a : Optional[Any] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} _a : List[Any] = {} _a : List[str] = find_extremes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for extremes in extremes_clusters: for element in extremes: _a : List[str] = element _a : Tuple = duplicate_indices - set(extreme_dict.keys() ) _a : Tuple = dataset.filter(lambda UpperCamelCase__ , UpperCamelCase__ : idx not in remove_indices , with_indices=UpperCamelCase__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: _a : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: _a : List[Any] = extreme_dict[element["""base_index"""]]["""copies"""] print(F"""Original dataset size: {len(UpperCamelCase__ )}""" ) print(F"""Number of duplicate clusters: {len(UpperCamelCase__ )}""" ) print(F"""Files in duplicate cluster: {len(UpperCamelCase__ )}""" ) print(F"""Unique files in duplicate cluster: {len(UpperCamelCase__ )}""" ) print(F"""Filtered dataset size: {len(UpperCamelCase__ )}""" ) return ds_filter, duplicate_clusters
294
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
0
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: list , SCREAMING_SNAKE_CASE_: int ) -> list: '''simple docstring''' A__ = len(SCREAMING_SNAKE_CASE_ ) A__ = [[0] * n for i in range(SCREAMING_SNAKE_CASE_ )] for i in range(SCREAMING_SNAKE_CASE_ ): A__ = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE_ ): for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
68
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
0
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _snake_case = 16 _snake_case = 32 def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = 16 ) -> Tuple: __UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) __UpperCAmelCase : Union[str, Any] = DatasetDict( { "train": dataset["train"].select(snake_case__ ), "validation": dataset["train"].select(snake_case__ ), "test": dataset["validation"], } ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : Dict = tokenizer(examples["sentence1"], examples["sentence2"], truncation=snake_case__, max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCAmelCase : Dict = datasets.map( snake_case__, batched=snake_case__, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCAmelCase : List[str] = tokenized_datasets.rename_column("label", "labels" ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCAmelCase : List[Any] = 16 elif accelerator.mixed_precision != "no": __UpperCAmelCase : List[str] = 8 else: __UpperCAmelCase : int = None return tokenizer.pad( snake_case__, padding="longest", max_length=snake_case__, pad_to_multiple_of=snake_case__, return_tensors="pt", ) # Instantiate dataloaders. __UpperCAmelCase : Tuple = DataLoader( tokenized_datasets["train"], shuffle=snake_case__, collate_fn=snake_case__, batch_size=snake_case__ ) __UpperCAmelCase : int = DataLoader( tokenized_datasets["validation"], shuffle=snake_case__, collate_fn=snake_case__, batch_size=snake_case__ ) __UpperCAmelCase : Dict = DataLoader( tokenized_datasets["test"], shuffle=snake_case__, collate_fn=snake_case__, batch_size=snake_case__ ) return train_dataloader, eval_dataloader, test_dataloader def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]: # New Code # __UpperCAmelCase : str = [] # Download the dataset __UpperCAmelCase : Tuple = load_dataset("glue", "mrpc" ) # Create our splits __UpperCAmelCase : List[str] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __UpperCAmelCase : Tuple = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCAmelCase : Union[str, Any] = config["lr"] __UpperCAmelCase : Optional[int] = int(config["num_epochs"] ) __UpperCAmelCase : Any = int(config["seed"] ) __UpperCAmelCase : List[str] = int(config["batch_size"] ) __UpperCAmelCase : Dict = evaluate.load("glue", "mrpc" ) # If the batch size is too big we use gradient accumulation __UpperCAmelCase : Optional[int] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE __UpperCAmelCase : str = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) # New Code # # Create our folds: __UpperCAmelCase : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] ) __UpperCAmelCase : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(snake_case__ ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = get_fold_dataloaders( snake_case__, snake_case__, snake_case__, snake_case__, ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCAmelCase : Any = model.to(accelerator.device ) # Instantiate optimizer __UpperCAmelCase : str = AdamW(params=model.parameters(), lr=snake_case__ ) # Instantiate scheduler __UpperCAmelCase : List[str] = get_linear_schedule_with_warmup( optimizer=snake_case__, num_warmup_steps=100, num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = accelerator.prepare( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCAmelCase : Union[str, Any] = model(**snake_case__ ) __UpperCAmelCase : Union[str, Any] = outputs.loss __UpperCAmelCase : int = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(**snake_case__ ) __UpperCAmelCase : List[str] = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase , __UpperCAmelCase : int = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=snake_case__, references=snake_case__, ) __UpperCAmelCase : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''', snake_case__ ) # New Code # # We also run predictions on the test set at the very end __UpperCAmelCase : List[str] = [] for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(**snake_case__ ) __UpperCAmelCase : str = outputs.logits __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(snake_case__, dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __UpperCAmelCase : Union[str, Any] = torch.cat(snake_case__, dim=0 ) __UpperCAmelCase : Tuple = torch.stack(snake_case__, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __UpperCAmelCase : Any = metric.compute(predictions=snake_case__, references=snake_case__ ) accelerator.print("Average test metrics from all folds:", snake_case__ ) def _UpperCamelCase ( ) -> Dict: __UpperCAmelCase : Tuple = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision", type=snake_case__, default=snake_case__, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." ) # New Code # parser.add_argument("--num_folds", type=snake_case__, default=3, help="The number of splits to perform across the dataset" ) __UpperCAmelCase : str = parser.parse_args() __UpperCAmelCase : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(snake_case__, snake_case__ ) if __name__ == "__main__": main()
157
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
0
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class A ( pl.LightningModule ): '''simple docstring''' def __init__(self , _UpperCAmelCase ) -> Optional[Any]: super().__init__() __UpperCamelCase : Union[str, Any] = model __UpperCamelCase : Dict = 2 __UpperCamelCase : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def a_ (self ) -> int: pass def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ): # load longformer model from model identifier __UpperCamelCase : Tuple = LongformerModel.from_pretrained(snake_case__ ) __UpperCamelCase : Tuple = LightningModel(snake_case__ ) __UpperCamelCase : List[Any] = torch.load(snake_case__ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model __UpperCamelCase : int = LongformerForQuestionAnswering.from_pretrained(snake_case__ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(snake_case__ ) print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
298
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : int =logging.get_logger(__name__) __lowerCAmelCase : str ='▁' __lowerCAmelCase : Tuple ={'vocab_file': 'sentencepiece.bpe.model'} __lowerCAmelCase : Union[str, Any] ={ 'vocab_file': { 'facebook/mbart-large-50-one-to-many-mmt': ( 'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model' ), } } __lowerCAmelCase : int ={ 'facebook/mbart-large-50-one-to-many-mmt': 1_0_2_4, } # fmt: off __lowerCAmelCase : Optional[Any] =['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI'] class _lowercase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Dict = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ : int = [] SCREAMING_SNAKE_CASE__ : List[Any] = [] def __init__( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :List[Any]="</s>" , lowerCAmelCase__ :Any="<s>" , lowerCAmelCase__ :int="<unk>" , lowerCAmelCase__ :Dict="<pad>" , lowerCAmelCase__ :Tuple="<mask>" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :List[Any] , ) -> Union[str, Any]: # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs __SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) __SCREAMING_SNAKE_CASE : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __SCREAMING_SNAKE_CASE : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __SCREAMING_SNAKE_CASE : int = 1 __SCREAMING_SNAKE_CASE : int = len(self.sp_model ) __SCREAMING_SNAKE_CASE : Tuple = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a ) } __SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.lang_code_to_id.items()} __SCREAMING_SNAKE_CASE : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __SCREAMING_SNAKE_CASE : Any = src_lang if src_lang is not None else '''en_XX''' __SCREAMING_SNAKE_CASE : List[str] = self.lang_code_to_id[self._src_lang] __SCREAMING_SNAKE_CASE : List[str] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __magic_name__( self :int ) -> Union[str, Any]: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __magic_name__( self :Optional[int] ) -> List[str]: return self._src_lang @src_lang.setter def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str ) -> Any: __SCREAMING_SNAKE_CASE : List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self :List[str] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.__dict__.copy() __SCREAMING_SNAKE_CASE : Tuple = None return state def __setstate__( self :Union[str, Any] , lowerCAmelCase__ :Dict ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __SCREAMING_SNAKE_CASE : Dict = {} __SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __magic_name__( self :Tuple , lowerCAmelCase__ :str ) -> Tuple: return self.sp_model.encode(__a , out_type=__a ) def __magic_name__( self :Dict , lowerCAmelCase__ :str ) -> Optional[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(__a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __magic_name__( self :Dict , lowerCAmelCase__ :Dict ) -> List[str]: __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : Tuple = '''''' __SCREAMING_SNAKE_CASE : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__a ) + token __SCREAMING_SNAKE_CASE : List[Any] = True __SCREAMING_SNAKE_CASE : int = [] else: current_sub_tokens.append(__a ) __SCREAMING_SNAKE_CASE : int = False out_string += self.sp_model.decode(__a ) return out_string.strip() def __magic_name__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple: if not os.path.isdir(__a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : Dict = os.path.join( __a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , '''wb''' ) as fi: __SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,) def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> int: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) __SCREAMING_SNAKE_CASE : int = [1] * len(self.prefix_tokens ) __SCREAMING_SNAKE_CASE : str = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def __magic_name__( self :Dict , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> Optional[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] , lowerCAmelCase__ :Optional[str] , **lowerCAmelCase__ :Tuple ) -> List[str]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __SCREAMING_SNAKE_CASE : List[Any] = src_lang __SCREAMING_SNAKE_CASE : List[str] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a ) __SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(__a ) __SCREAMING_SNAKE_CASE : Optional[Any] = tgt_lang_id return inputs def __magic_name__( self :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str = "en_XX" , lowerCAmelCase__ :Optional[List[str]] = None , lowerCAmelCase__ :str = "ro_RO" , **lowerCAmelCase__ :int , ) -> Tuple: __SCREAMING_SNAKE_CASE : str = src_lang __SCREAMING_SNAKE_CASE : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(__a , __a , **__a ) def __magic_name__( self :str ) -> Dict: return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__( self :int ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :str ) -> Tuple: __SCREAMING_SNAKE_CASE : Tuple = self.lang_code_to_id[src_lang] __SCREAMING_SNAKE_CASE : List[Any] = [self.cur_lang_code_id] __SCREAMING_SNAKE_CASE : Any = [self.eos_token_id] def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str ) -> List[str]: __SCREAMING_SNAKE_CASE : str = self.lang_code_to_id[tgt_lang] __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cur_lang_code_id] __SCREAMING_SNAKE_CASE : str = [self.eos_token_id]
9
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
0
"""simple docstring""" from __future__ import annotations def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) == 0: return False lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _SCREAMING_SNAKE_CASE ) else: return binary_search(a_list[midpoint + 1 :] , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A = input("""Enter numbers separated by comma:\n""").strip() __A = [int(item.strip()) for item in user_input.split(""",""")] __A = int(input("""Enter the number to be found in the list:\n""").strip()) __A = '' if binary_search(sequence, target) else 'not ' print(F'''{target} was {not_str}found in {sequence}''')
293
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): """simple docstring""" a = StableDiffusionPanoramaPipeline a = TEXT_TO_IMAGE_PARAMS a = TEXT_TO_IMAGE_BATCH_PARAMS a = TEXT_TO_IMAGE_IMAGE_PARAMS a = TEXT_TO_IMAGE_IMAGE_PARAMS def lowercase_ ( self : List[str] ) -> List[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ = DDIMScheduler() torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ = CLIPTextModel(__a ) SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase_ ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=0 ) -> List[Any]: SCREAMING_SNAKE_CASE__ = torch.manual_seed(__a ) SCREAMING_SNAKE_CASE__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : Dict ) -> int: SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__a ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__a ) SCREAMING_SNAKE_CASE__ = sd_pipe(**__a ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : Dict ) -> Optional[Any]: super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase_ ( self : List[str] ) -> int: super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 ) def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__a ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__a ) SCREAMING_SNAKE_CASE__ = '''french fries''' SCREAMING_SNAKE_CASE__ = sd_pipe(**__a , negative_prompt=__a ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__a ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__a ) SCREAMING_SNAKE_CASE__ = sd_pipe(**__a , view_batch_size=2 ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : str ) -> int: SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__a ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__a ) SCREAMING_SNAKE_CASE__ = sd_pipe(**__a ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase_ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__a ) SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline(**__a ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__a ) SCREAMING_SNAKE_CASE__ = sd_pipe(**__a ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : Any ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self : str , __lowerCamelCase : Optional[int]=0 ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = torch.manual_seed(__a ) SCREAMING_SNAKE_CASE__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowercase_ ( self : str ) -> str: SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base''' SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''' ) SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ = self.get_inputs() SCREAMING_SNAKE_CASE__ = pipe(**__a ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) SCREAMING_SNAKE_CASE__ = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def lowercase_ ( self : Any ) -> Tuple: SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=__a ) SCREAMING_SNAKE_CASE__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ = self.get_inputs() SCREAMING_SNAKE_CASE__ = pipe(**__a ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) SCREAMING_SNAKE_CASE__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def lowercase_ ( self : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE__ = 0 def callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base''' SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''' ) SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a ) SCREAMING_SNAKE_CASE__ = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ = self.get_inputs() pipe(**__a , callback=__a , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowercase_ ( self : Tuple ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-2-base''' SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''' ) SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a ) SCREAMING_SNAKE_CASE__ = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ = self.get_inputs() SCREAMING_SNAKE_CASE__ = pipe(**__a ) SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
314
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
0
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a( A : np.ndarray , A : np.ndarray , A : np.ndarray , A : int , A : int ) -> np.ndarray: """simple docstring""" a = cva.getAffineTransform(A , A ) return cva.warpAffine(A , A , (rows, cols) ) if __name__ == "__main__": # read original image _lowercase: Tuple = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value _lowercase: Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape _lowercase: List[str] = gray_img.shape # set different points to rotate image _lowercase: List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) _lowercase: Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) _lowercase: Optional[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) _lowercase: Tuple = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list _lowercase: Dict = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations _lowercase: Optional[int] = plt.figure(1) _lowercase: int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
227
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
162
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
0
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowercase_ ( lowerCamelCase_ ): A__ : Tuple = (KDPMaDiscreteScheduler,) A__ : str = 10 def lowerCamelCase_ ( self , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = { """num_train_timesteps""": 1_1_0_0, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__a ) return config def lowerCamelCase_ ( self ): """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase_ = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase_ = self.dummy_model() UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase_ = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase_ = scheduler.scale_model_input(__a , __a ) UpperCamelCase_ = model(__a , __a ) UpperCamelCase_ = scheduler.step(__a , __a , __a ) UpperCamelCase_ = output.prev_sample UpperCamelCase_ = torch.sum(torch.abs(__a ) ) UpperCamelCase_ = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2 assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0_002 ) < 1e-3 def lowerCamelCase_ ( self ): """simple docstring""" if torch_device == "mps": return UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) UpperCamelCase_ = self.dummy_model() UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma UpperCamelCase_ = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): UpperCamelCase_ = scheduler.scale_model_input(__a , __a ) UpperCamelCase_ = model(__a , __a ) UpperCamelCase_ = scheduler.step(__a , __a , __a ) UpperCamelCase_ = output.prev_sample UpperCamelCase_ = torch.sum(torch.abs(__a ) ) UpperCamelCase_ = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 def lowerCamelCase_ ( self ): """simple docstring""" if torch_device == "mps": return UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) UpperCamelCase_ = self.dummy_model() UpperCamelCase_ = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: UpperCamelCase_ = scheduler.scale_model_input(__a , __a ) UpperCamelCase_ = model(__a , __a ) UpperCamelCase_ = scheduler.step(__a , __a , __a ) UpperCamelCase_ = output.prev_sample UpperCamelCase_ = torch.sum(torch.abs(__a ) ) UpperCamelCase_ = torch.mean(torch.abs(__a ) ) if str(__a ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3
122
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) snake_case : List[Any] = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Optional[Any] = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : str = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Dict = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
240
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class UpperCamelCase ( lowerCamelCase_ ): UpperCamelCase : Dict = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
294
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } lowerCAmelCase__ = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } lowerCAmelCase__ = '▁' # Segments (not really needed) lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 lowerCAmelCase__ = 2 lowerCAmelCase__ = 3 lowerCAmelCase__ = 4 class a__ ( lowerCamelCase_ ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = 'left' __lowerCamelCase = XLNetTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , **lowercase , ) -> int: '''simple docstring''' A__ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( vocab_file=__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , ) A__ = 3 A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def UpperCamelCase ( self , lowercase , lowercase = None ) -> Dict: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase ( self , lowercase , lowercase = None ) -> Optional[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCamelCase ( self , lowercase , lowercase = None ) -> Tuple: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__a ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
68
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = None, ) -> List[str]: __UpperCAmelCase : Dict = {} if train_file is not None: __UpperCAmelCase : Dict = [train_file] if eval_file is not None: __UpperCAmelCase : Union[str, Any] = [eval_file] if test_file is not None: __UpperCAmelCase : Any = [test_file] __UpperCAmelCase : List[str] = datasets.load_dataset("csv", data_files=snake_case__ ) __UpperCAmelCase : Dict = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCAmelCase : Union[str, Any] = features_name.pop(snake_case__ ) __UpperCAmelCase : Tuple = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCAmelCase : List[Any] = {label: i for i, label in enumerate(snake_case__ )} __UpperCAmelCase : Optional[Any] = tokenizer.model_input_names __UpperCAmelCase : str = {} if len(snake_case__ ) == 1: for k in files.keys(): __UpperCAmelCase : Optional[int] = ds[k].map( lambda snake_case__ : tokenizer.batch_encode_plus( example[features_name[0]], truncation=snake_case__, max_length=snake_case__, padding="max_length" ), batched=snake_case__, ) elif len(snake_case__ ) == 2: for k in files.keys(): __UpperCAmelCase : Dict = ds[k].map( lambda snake_case__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]), truncation=snake_case__, max_length=snake_case__, padding="max_length", ), batched=snake_case__, ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCAmelCase : Any = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase : Optional[Any] = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCAmelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCAmelCase : str = {k: v for k, v in ex.items() if k in input_names} __UpperCAmelCase : Any = labelaid[ex[label_name]] yield (d, label) __UpperCAmelCase : str = ( tf.data.Dataset.from_generator( snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCAmelCase : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCAmelCase : List[Any] = ( tf.data.Dataset.from_generator( snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCAmelCase : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCAmelCase : List[str] = ( tf.data.Dataset.from_generator( snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCAmelCase : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case = logging.getLogger(__name__) @dataclass class _snake_case : lowerCamelCase__: str = field(metadata={"help": "Which column contains the label"} ) lowerCamelCase__: Tuple = field(default=lowerCamelCase_ , metadata={"help": "The path of the training file"} ) lowerCamelCase__: Dict = field(default=lowerCamelCase_ , metadata={"help": "The path of the development file"} ) lowerCamelCase__: str = field(default=lowerCamelCase_ , metadata={"help": "The path of the test file"} ) lowerCamelCase__: Tuple = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCamelCase__: Union[str, Any] = field( default=lowerCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class _snake_case : lowerCamelCase__: Any = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCamelCase__: str = field( default=lowerCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCamelCase__: List[str] = field( default=lowerCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCamelCase__: Optional[int] = field(default=lowerCamelCase_ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowerCamelCase__: Dict = field( default=lowerCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def _UpperCamelCase ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_tfds( train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=snake_case__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, ) __UpperCAmelCase : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(snake_case__ ), labelaid=snake_case__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task="text-classification", cache_dir=model_args.cache_dir, ) with training_args.strategy.scope(): __UpperCAmelCase : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_pt=bool(".bin" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, ) def compute_metrics(snake_case__ ) -> Dict: __UpperCAmelCase : List[str] = np.argmax(p.predictions, axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCAmelCase : Any = TFTrainer( model=snake_case__, args=snake_case__, train_dataset=snake_case__, eval_dataset=snake_case__, compute_metrics=snake_case__, ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCAmelCase : Tuple = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __UpperCAmelCase : Optional[int] = trainer.evaluate() __UpperCAmelCase : Any = os.path.join(training_args.output_dir, "eval_results.txt" ) with open(snake_case__, "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(snake_case__ ) return results if __name__ == "__main__": main()
157
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
0
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :str ) -> int: __SCREAMING_SNAKE_CASE : Any = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__a ) ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__a ) ) def __magic_name__( self :List[str] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__a ) ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : int = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__a ) ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : Dict = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :Any ) -> Any: __SCREAMING_SNAKE_CASE : Optional[int] = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __SCREAMING_SNAKE_CASE : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :Any ) -> Union[str, Any]: # pass variant but use the non-variant filenames __SCREAMING_SNAKE_CASE : Dict = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] __SCREAMING_SNAKE_CASE : int = '''fp16''' self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :Optional[Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] __SCREAMING_SNAKE_CASE : List[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[str] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] __SCREAMING_SNAKE_CASE : List[str] = '''fp16''' self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :List[str] ) -> Dict: # pass variant but use the non-variant filenames __SCREAMING_SNAKE_CASE : List[str] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] __SCREAMING_SNAKE_CASE : str = '''fp16''' self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
9
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
0
"""simple docstring""" from math import sqrt def __A (_SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ :Tuple = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ :str = False for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ :Tuple = False break # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool" return status def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ :Optional[int] = list(range(2 , n + 1 ) ) lowerCAmelCase__ :List[Any] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ :str = 0 # filters actual prime numbers. lowerCAmelCase__ :Union[str, Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ :Dict = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_SCREAMING_SNAKE_CASE ): ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ :List[str] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ :Optional[Any] = 2 lowerCAmelCase__ :Any = number if number == 0 or number == 1: ans.append(_SCREAMING_SNAKE_CASE ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_SCREAMING_SNAKE_CASE ): while quotient != 1: if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0): ans.append(_SCREAMING_SNAKE_CASE ) quotient /= factor else: factor += 1 else: ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list" return ans def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ :Any = 0 # prime factorization of 'number' lowerCAmelCase__ :Dict = prime_factorization(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Tuple = max(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ :int = 0 # prime factorization of 'number' lowerCAmelCase__ :Optional[int] = prime_factorization(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = min(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int" return ans def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 == 0 def __A (_SCREAMING_SNAKE_CASE ) ->Tuple: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int" assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool" return number % 2 != 0 def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ :List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ :Optional[int] = get_prime_numbers(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) # run variable for while-loops. lowerCAmelCase__ :Tuple = 0 lowerCAmelCase__ :Optional[Any] = None # exit variable. for break up the loops lowerCAmelCase__ :int = True while i < len_pn and loop: lowerCAmelCase__ :Optional[int] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ :Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (len(_SCREAMING_SNAKE_CASE ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ :Dict = 0 while numbera != 0: lowerCAmelCase__ :Dict = numbera % numbera lowerCAmelCase__ :Optional[int] = numbera lowerCAmelCase__ :Optional[Any] = rest # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]: """simple docstring""" assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ :Union[str, Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ :List[Any] = prime_factorization(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = prime_factorization(_SCREAMING_SNAKE_CASE ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ :List[str] = [] lowerCAmelCase__ :Optional[int] = [] lowerCAmelCase__ :List[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :List[Any] = 0 lowerCAmelCase__ :Tuple = 0 lowerCAmelCase__ :Optional[int] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ :Optional[int] = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :int = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ): ans *= n else: lowerCAmelCase__ :List[str] = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): ans *= n done.append(_SCREAMING_SNAKE_CASE ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ :Tuple = prime_fac_a.count(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ): ans *= n done.append(_SCREAMING_SNAKE_CASE ) # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ :Optional[Any] = 0 lowerCAmelCase__ :Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): ans += 1 # precondition assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime( _SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int" return ans def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str: """simple docstring""" assert ( is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ :Tuple = p_number_a + 1 # jump to the next number lowerCAmelCase__ :Any = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): number += 1 while number < p_number_a: ans.append(_SCREAMING_SNAKE_CASE ) number += 1 # fetch the next prime number. while not is_prime(_SCREAMING_SNAKE_CASE ): number += 1 # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ans[0] != p_number_a and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ :int = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_SCREAMING_SNAKE_CASE ) # precondition assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)" return ans def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ :List[Any] = get_divisors(_SCREAMING_SNAKE_CASE ) # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (divisors[0] == 1) and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ :Any = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) ) # precondition assert ( isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ :str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ :int = 0 lowerCAmelCase__ :Any = 1 lowerCAmelCase__ :Tuple = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ :Any = ans ans += fiba lowerCAmelCase__ :int = tmp return ans
293
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
0
def UpperCAmelCase_ ( _A = 2_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [1, 2, 5, 10, 20, 50, 1_00, 2_00] SCREAMING_SNAKE_CASE__ = [0] * (pence + 1) SCREAMING_SNAKE_CASE__ = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_A , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 73682
314
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase: List[Any] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: Union[str, Any] = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: Union[str, Any] = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys _lowercase: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
227
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm __lowerCamelCase = logging.get_logger(__name__) @dataclass class A__ ( lowerCamelCase_ ): lowercase = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **UpperCamelCase__ ) -> Any: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A_ = deprecated_arg[3:] setattr(self , __a , not kwargs.pop(__a ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) A_ = kwargs.pop("""torchscript""" , self.torchscript ) A_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) A_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**__a ) lowercase = field(default=lowerCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) lowercase = field(default=lowerCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) lowercase = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: A_ = torch.device("""cpu""" ) A_ = 0 elif is_torch_tpu_available(): A_ = xm.xla_device() A_ = 0 else: A_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A_ = torch.cuda.device_count() return device, n_gpu @property def snake_case_ ( self ) -> Any: '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def snake_case_ ( self ) -> int: '''simple docstring''' requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def snake_case_ ( self ) -> List[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def snake_case_ ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def snake_case_ ( self ) -> List[str]: '''simple docstring''' return self.n_gpu > 0
162
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
0
import math def lowerCamelCase__ ( a__ : int ) -> list[int]: UpperCamelCase_ = [] UpperCamelCase_ = 2 UpperCamelCase_ = int(math.sqrt(a__ ) ) # Size of every segment UpperCamelCase_ = [True] * (end + 1) UpperCamelCase_ = [] while start <= end: if temp[start] is True: in_prime.append(a__ ) for i in range(start * start , end + 1 , a__ ): UpperCamelCase_ = False start += 1 prime += in_prime UpperCamelCase_ = end + 1 UpperCamelCase_ = min(2 * end , a__ ) while low <= n: UpperCamelCase_ = [True] * (high - low + 1) for each in in_prime: UpperCamelCase_ = math.floor(low / each ) * each if t < low: t += each for j in range(a__ , high + 1 , a__ ): UpperCamelCase_ = False for j in range(len(a__ ) ): if temp[j] is True: prime.append(j + low ) UpperCamelCase_ = high + 1 UpperCamelCase_ = min(high + end , a__ ) return prime print(sieve(10**6))
122
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
0
import numpy as np def __lowercase ( __lowerCAmelCase : np.ndarray ): return 1 / (1 + np.exp(-vector )) def __lowercase ( __lowerCAmelCase : np.ndarray ): return vector * sigmoid(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
240
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Dict = filter(lambda UpperCamelCase__ : p.requires_grad , model.parameters() ) _a : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _snake_case = logging.getLogger(__name__) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if metric == "rouge2": _a : Dict = """{val_avg_rouge2:.4f}-{step_count}""" elif metric == "bleu": _a : int = """{val_avg_bleu:.4f}-{step_count}""" elif metric == "em": _a : Optional[Any] = """{val_avg_em:.4f}-{step_count}""" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" """ function.""" ) _a : str = ModelCheckpoint( dirpath=UpperCamelCase__ , filename=UpperCamelCase__ , monitor=F"""val_{metric}""" , mode="""max""" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' return EarlyStopping( monitor=F"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCamelCase__ , verbose=UpperCamelCase__ , ) class UpperCamelCase ( pl.Callback ): def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) -> Tuple: _a : Any = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def _lowercase ( self : Optional[int] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple=True ) -> int: logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) _a : Dict = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} ) # Log results _a : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": _a : str = od / """test_results.txt""" _a : List[str] = od / """test_generations.txt""" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a : Any = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" _a : Any = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , """a+""" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a : Dict = metrics[key] if isinstance(__a , torch.Tensor ): _a : int = val.item() _a : List[Any] = f"""{key}: {val:.6f}\n""" writer.write(__a ) if not save_generations: return if "preds" in metrics: _a : Dict = """\n""".join(metrics["""preds"""] ) generations_file.open("""w+""" ).write(__a ) @rank_zero_only def _lowercase ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any: try: _a : List[str] = pl_module.model.model.num_parameters() except AttributeError: _a : List[str] = pl_module.model.num_parameters() _a : Tuple = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} ) @rank_zero_only def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : pl.LightningModule ) -> Optional[Any]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , """test""" ) @rank_zero_only def _lowercase ( self : Any , UpperCAmelCase__ : pl.Trainer , UpperCAmelCase__ : int ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
294
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'microsoft/swin-tiny-patch4-window7-224': ( 'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json' ), # See all Swin models at https://huggingface.co/models?filter=swin } class a__ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" __lowerCamelCase = 'swin' __lowerCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , lowercase=224 , lowercase=4 , lowercase=3 , lowercase=96 , lowercase=[2, 2, 6, 2] , lowercase=[3, 6, 12, 24] , lowercase=7 , lowercase=4.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=0.02 , lowercase=1e-5 , lowercase=32 , lowercase=None , lowercase=None , **lowercase , ) -> Any: '''simple docstring''' super().__init__(**__a ) A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = len(__a ) A__ = num_heads A__ = window_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = use_absolute_embeddings A__ = layer_norm_eps A__ = initializer_range A__ = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A__ = int(embed_dim * 2 ** (len(__a ) - 1) ) A__ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(__a ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names ) class a__ ( lowerCamelCase_ ): """simple docstring""" __lowerCamelCase = version.parse('1.11' ) @property def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' return 1e-4
68
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
0
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: str=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Dict=True , __lowerCamelCase: Any=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Optional[int]=99 , __lowerCamelCase: Union[str, Any]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Dict=37 , __lowerCamelCase: List[str]="gelu" , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: List[Any]=0.1 , __lowerCamelCase: Tuple=1_28 , __lowerCamelCase: int=32 , __lowerCamelCase: int=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: Any=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Any=None , ) -> Optional[Any]: __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Optional[Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Dict = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : str = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : Any = type_vocab_size __UpperCAmelCase : Union[str, Any] = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Any = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[str] = scope def _lowerCamelCase ( self: List[Any] ) -> Any: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Dict = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Tuple = None __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self: List[str] ) -> Any: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self: List[str] ) -> Tuple: ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[Any] = self.prepare_config_and_inputs() __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: str ) -> Optional[Any]: __UpperCAmelCase : List[str] = NezhaModel(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a ) __UpperCAmelCase : Optional[Any] = model(__a , token_type_ids=__a ) __UpperCAmelCase : str = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , ) -> Optional[int]: __UpperCAmelCase : str = True __UpperCAmelCase : Dict = NezhaModel(__a ) model.to(__a ) model.eval() __UpperCAmelCase : Union[str, Any] = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) __UpperCAmelCase : Any = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , ) __UpperCAmelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str] ) -> Optional[int]: __UpperCAmelCase : List[Any] = NezhaForMaskedLM(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] ) -> int: __UpperCAmelCase : str = NezhaForNextSentencePrediction(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Dict = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase : Optional[int] = NezhaForPreTraining(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Optional[Any] = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _lowerCamelCase ( self: Any , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = NezhaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : int = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ) -> Any: __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Optional[int] = NezhaForSequenceClassification(__a ) model.to(__a ) model.eval() __UpperCAmelCase : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: List[str] ) -> Optional[int]: __UpperCAmelCase : str = self.num_labels __UpperCAmelCase : Dict = NezhaForTokenClassification(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] ) -> List[Any]: __UpperCAmelCase : int = self.num_choices __UpperCAmelCase : List[str] = NezhaForMultipleChoice(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[int] = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self: Any ) -> Dict: __UpperCAmelCase : str = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : str = config_and_inputs __UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): lowerCamelCase__: List[str] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase__: Tuple = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__: Tuple = True def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Any , __lowerCamelCase: Optional[int]=False ) -> Dict: __UpperCAmelCase : Tuple = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class in get_values(__a ): __UpperCAmelCase : Dict = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a ) __UpperCAmelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def _lowerCamelCase ( self: int ) -> Dict: __UpperCAmelCase : List[str] = NezhaModelTester(self ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 ) def _lowerCamelCase ( self: int ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowerCamelCase ( self: Union[str, Any] ) -> Any: __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__a ) def _lowerCamelCase ( self: str ) -> Union[str, Any]: # This regression test was failing with PyTorch < 1.3 ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() __UpperCAmelCase : Any = None self.model_tester.create_and_check_model_as_decoder( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) def _lowerCamelCase ( self: Optional[int] ) -> Any: __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def _lowerCamelCase ( self: Any ) -> Tuple: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a ) def _lowerCamelCase ( self: Tuple ) -> List[str]: __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__a ) def _lowerCamelCase ( self: Tuple ) -> Optional[int]: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a ) def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) def _lowerCamelCase ( self: Optional[Any] ) -> Dict: __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def _lowerCamelCase ( self: Dict ) -> Tuple: __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def _lowerCamelCase ( self: int ) -> Tuple: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[Any] = NezhaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @slow @require_torch_gpu def _lowerCamelCase ( self: List[str] ) -> str: __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __UpperCAmelCase : int = True __UpperCAmelCase : Dict = model_class(config=__a ) __UpperCAmelCase : List[str] = self._prepare_for_class(__a , __a ) __UpperCAmelCase : Optional[Any] = torch.jit.trace( __a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__a , os.path.join(__a , "bert.pt" ) ) __UpperCAmelCase : List[Any] = torch.jit.load(os.path.join(__a , "bert.pt" ) , map_location=__a ) loaded(inputs_dict["input_ids"].to(__a ) , inputs_dict["attention_mask"].to(__a ) ) @require_torch class _snake_case ( unittest.TestCase ): @slow def _lowerCamelCase ( self: List[Any] ) -> Optional[int]: __UpperCAmelCase : Dict = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" ) __UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(__a , attention_mask=__a )[0] __UpperCAmelCase : Dict = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , __a ) __UpperCAmelCase : str = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) ) @slow def _lowerCamelCase ( self: List[str] ) -> Dict: __UpperCAmelCase : Optional[int] = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" ) __UpperCAmelCase : int = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(__a , attention_mask=__a )[0] __UpperCAmelCase : Union[str, Any] = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , __a ) __UpperCAmelCase : List[Any] = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
157
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
0
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_0 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_0 , _UpperCAmelCase=0.02 , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=None , ) -> List[Any]: __UpperCamelCase : List[Any] = parent __UpperCamelCase : Union[str, Any] = batch_size __UpperCamelCase : List[Any] = image_size __UpperCamelCase : Union[str, Any] = num_channels __UpperCamelCase : Dict = patch_size __UpperCamelCase : Optional[int] = num_frames __UpperCamelCase : List[str] = is_training __UpperCamelCase : List[str] = use_labels __UpperCamelCase : Dict = hidden_size __UpperCamelCase : int = num_hidden_layers __UpperCamelCase : str = num_attention_heads __UpperCamelCase : List[Any] = intermediate_size __UpperCamelCase : List[str] = hidden_act __UpperCamelCase : Union[str, Any] = hidden_dropout_prob __UpperCamelCase : Tuple = attention_probs_dropout_prob __UpperCamelCase : int = attention_type __UpperCamelCase : Union[str, Any] = initializer_range __UpperCamelCase : Union[str, Any] = scope __UpperCamelCase : Tuple = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __UpperCamelCase : str = (image_size // patch_size) ** 2 __UpperCamelCase : Any = (num_frames) * self.num_patches_per_frame + 1 def a_ (self ) -> Tuple: __UpperCamelCase : Union[str, Any] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase : Any = self.get_config() return config, pixel_values, labels def a_ (self ) -> Optional[Any]: __UpperCamelCase : Optional[Any] = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __UpperCamelCase : Optional[Any] = self.num_labels return config def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict: __UpperCamelCase : Union[str, Any] = TimesformerModel(config=__a ) model.to(__a ) model.eval() __UpperCamelCase : Dict = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: __UpperCamelCase : List[Any] = TimesformerForVideoClassification(__a ) model.to(__a ) model.eval() __UpperCamelCase : List[Any] = model(__a ) # verify the logits shape __UpperCamelCase : List[str] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __a ) def a_ (self ) -> List[Any]: __UpperCamelCase : List[Any] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = config_and_inputs __UpperCamelCase : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' A = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () A = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) A = False A = False A = False A = False def a_ (self ) -> str: __UpperCamelCase : Dict = TimesformerModelTester(self ) __UpperCamelCase : Any = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=3_7 ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple: __UpperCamelCase : Union[str, Any] = copy.deepcopy(__a ) if return_labels: if model_class in get_values(__a ): __UpperCamelCase : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) return inputs_dict def a_ (self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def a_ (self ) -> List[Any]: pass def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : Optional[int] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a_ (self ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : List[Any] = model_class(__a ) __UpperCamelCase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase : str = [*signature.parameters.keys()] __UpperCamelCase : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a_ (self ) -> List[str]: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__a ) @slow def a_ (self ) -> List[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Optional[Any] = TimesformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a_ (self ) -> Optional[Any]: if not self.has_attentions: pass else: __UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : Dict = True for model_class in self.all_model_classes: __UpperCamelCase : Optional[Any] = self.model_tester.seq_length __UpperCamelCase : int = self.model_tester.num_frames __UpperCamelCase : Tuple = True __UpperCamelCase : Optional[Any] = False __UpperCamelCase : Any = True __UpperCamelCase : Tuple = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __UpperCamelCase : str = model(**self._prepare_for_class(__a , __a ) ) __UpperCamelCase : Optional[int] = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCamelCase : Optional[int] = True __UpperCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__a , __a ) ) __UpperCamelCase : Optional[Any] = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __UpperCamelCase : Optional[Any] = len(__a ) # Check attention is always last and order is fine __UpperCamelCase : int = True __UpperCamelCase : Optional[Any] = True __UpperCamelCase : int = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 1 , len(__a ) ) __UpperCamelCase : int = outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def a_ (self ) -> Optional[int]: def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __UpperCamelCase : List[str] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__a , __a ) ) __UpperCamelCase : Dict = outputs.hidden_states __UpperCamelCase : Dict = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__a ) , __a ) __UpperCamelCase : Dict = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __UpperCamelCase , __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : int = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase : Any = True check_hidden_states_output(__a , __a , __a ) def __lowerCAmelCase ( ): __UpperCamelCase : str = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) __UpperCamelCase : Union[str, Any] = np.load(snake_case__ ) return list(snake_case__ ) @require_torch @require_vision class A ( unittest.TestCase ): '''simple docstring''' @cached_property def a_ (self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def a_ (self ) -> Tuple: __UpperCamelCase : Optional[int] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( __a ) __UpperCamelCase : int = self.default_image_processor __UpperCamelCase : str = prepare_video() __UpperCamelCase : List[Any] = image_processor(video[:8] , return_tensors="pt" ).to(__a ) # forward pass with torch.no_grad(): __UpperCamelCase : Optional[int] = model(**__a ) # verify the logits __UpperCamelCase : Any = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , __a ) __UpperCamelCase : int = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
298
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
0
from timeit import timeit __lowerCAmelCase : int ={ 'MALAYALAM': True, 'String': False, 'rotor': True, 'level': True, 'A': True, 'BB': True, 'ABC': False, 'amanaplanacanalpanama': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase__ ) // 2 __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(lowercase__ ) ) def _UpperCamelCase ( lowercase__ ): if len(lowercase__ ) <= 2: return True if s[0] == s[len(lowercase__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def _UpperCamelCase ( lowercase__ ): return s == s[::-1] def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : List[str] = F'''all({name}(key) is value for key, value in test_data.items())''' __SCREAMING_SNAKE_CASE : Any = F'''from __main__ import test_data, {name}''' __SCREAMING_SNAKE_CASE : Dict = 500000 __SCREAMING_SNAKE_CASE : List[str] = timeit(stmt=lowercase__ , setup=lowercase__ , number=lowercase__ ) print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"""{key:21} {value}""") print('a man a plan a canal panama') # finished 500,000 runs in 0.46793 seconds benchmark_function('is_palindrome_slice') # finished 500,000 runs in 0.85234 seconds benchmark_function('is_palindrome') # finished 500,000 runs in 1.32028 seconds benchmark_function('is_palindrome_recursive') # finished 500,000 runs in 2.08679 seconds benchmark_function('is_palindrome_traversal')
9
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
0
"""simple docstring""" import os from datetime import datetime as dt from github import Github __A = [ 'good first issue', 'feature request', 'wip', ] def __A () ->int: """simple docstring""" lowerCAmelCase__ :Optional[Any] = Github(os.environ['GITHUB_TOKEN'] ) lowerCAmelCase__ :str = g.get_repo('huggingface/accelerate' ) lowerCAmelCase__ :str = repo.get_issues(state='open' ) for issue in open_issues: lowerCAmelCase__ :Dict = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :Optional[Any] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None lowerCAmelCase__ :List[str] = dt.utcnow() lowerCAmelCase__ :str = (current_time - issue.updated_at).days lowerCAmelCase__ :List[str] = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
293
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE : Any = get_tests_dir('''fixtures''') _SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') _SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/dummy-config.json''') class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = 0 def lowercase_ ( self : str ) -> Dict: SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(__a , __a ) def lowercase_ ( self : Tuple ) -> Dict: SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def lowercase_ ( self : List[Any] ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop('''feature_extractor_type''' ) SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__a , __a ) def lowercase_ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]: with self.assertRaisesRegex( __a , '''bert-base is not a local folder and is not a valid model identifier''' ): SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def lowercase_ ( self : Optional[Any] ) -> List[str]: with self.assertRaisesRegex( __a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a , revision='''aaaaaa''' ) def lowercase_ ( self : List[Any] ) -> Any: with self.assertRaisesRegex( __a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase_ ( self : List[Any] ) -> List[str]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def lowercase_ ( self : Any ) -> Any: try: AutoConfig.register('''custom''' , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowercase_ ( self : Tuple ) -> List[str]: class UpperCAmelCase__ ( lowerCamelCase_ ): """simple docstring""" a = True try: AutoConfig.register('''custom''' , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(__a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
314
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params _lowercase: Any = getLogger(__name__) _lowercase: int = 'cuda' if torch.cuda.is_available() else 'cpu' def a( A : List[str] , A : str , A : str , A : int = 8 , A : str = DEFAULT_DEVICE , A : List[Any]=False , A : int="summarization" , A : int=None , **A : int , ) -> Dict: """simple docstring""" a = Path(A ).open("w" , encoding="utf-8" ) a = str(A ) a = AutoModelForSeqaSeqLM.from_pretrained(A ).to(A ) if fpaa: a = model.half() a = AutoTokenizer.from_pretrained(A ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. a = time.time() # update config with task specific params use_task_specific_params(A , A ) if prefix is None: a = prefix or getattr(model.config , "prefix" , "" ) or "" for examples_chunk in tqdm(list(chunks(A , A ) ) ): a = [prefix + text for text in examples_chunk] a = tokenizer(A , return_tensors="pt" , truncation=A , padding="longest" ).to(A ) a = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A , ) a = tokenizer.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A ) for hypothesis in dec: fout.write(hypothesis + "\n" ) fout.flush() fout.close() a = int(time.time() - start_time ) # seconds a = len(A ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def a( ) -> int: """simple docstring""" return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" ) def a( A : str=True ) -> int: """simple docstring""" a = argparse.ArgumentParser() parser.add_argument("model_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("input_path" , type=A , help="like cnn_dm/test.source" ) parser.add_argument("save_path" , type=A , help="where to save summaries" ) parser.add_argument("--reference_path" , type=A , required=A , help="like cnn_dm/test.target" ) parser.add_argument("--score_path" , type=A , required=A , default="metrics.json" , help="where to save metrics" ) parser.add_argument("--device" , type=A , required=A , default=A , help="cuda, cuda:1, cpu etc." ) parser.add_argument( "--prefix" , type=A , required=A , default=A , help="will be added to the begininng of src examples" ) parser.add_argument("--task" , type=A , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=A , default=8 , required=A , help="batch size" ) parser.add_argument( "--n_obs" , type=A , default=-1 , required=A , help="How many observations. Defaults to all." ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" ) parser.add_argument( "--info" , nargs="?" , type=A , const=datetime_now() , help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate a , a = parser.parse_known_args() a = parse_numeric_n_bool_cl_kwargs(A ) if parsed_args and verbose: print(f'''parsed the following generate kwargs: {parsed_args}''' ) a = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: a = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=A ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("Can't mix --fp16 and --device cpu" ) a = generate_summaries_or_translations( A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A , ) if args.reference_path is None: return {} # Compute scores a = calculate_bleu if "translation" in args.task else calculate_rouge a = [x.rstrip() for x in open(args.save_path ).readlines()] a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A )] a = score_fn(A , A ) scores.update(A ) if args.dump_args: scores.update(A ) if args.info: a = args.info if verbose: print(A ) if args.score_path is not None: json.dump(A , open(args.score_path , "w" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
227
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class A__ ( lowerCamelCase_ , lowerCamelCase_ ): @register_to_config def __init__( self , UpperCamelCase__ = 768 , ) -> Union[str, Any]: '''simple docstring''' super().__init__() A_ = nn.Parameter(torch.zeros(1 , __a ) ) A_ = nn.Parameter(torch.ones(1 , __a ) ) def snake_case_ ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> Optional[int]: '''simple docstring''' A_ = nn.Parameter(self.mean.to(__a ).to(__a ) ) A_ = nn.Parameter(self.std.to(__a ).to(__a ) ) return self def snake_case_ ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = (embeds - self.mean) * 1.0 / self.std return embeds def snake_case_ ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = (embeds * self.std) + self.mean return embeds
162
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ ( lowerCamelCase_ ): A__ : int = (DDPMScheduler,) def lowerCamelCase_ ( self , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = { """num_train_timesteps""": 1_0_0_0, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__a ) return config def lowerCamelCase_ ( self ): """simple docstring""" for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def lowerCamelCase_ ( self ): """simple docstring""" self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def lowerCamelCase_ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCamelCase_ ( self ): """simple docstring""" for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=__a ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5 def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = len(__a ) UpperCamelCase_ = self.dummy_model() UpperCamelCase_ = self.dummy_sample_deter UpperCamelCase_ = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual UpperCamelCase_ = model(__a , __a ) # 2. predict previous mean of sample x_t-1 UpperCamelCase_ = scheduler.step(__a , __a , __a , generator=__a ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase_ = pred_prev_sample UpperCamelCase_ = torch.sum(torch.abs(__a ) ) UpperCamelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = len(__a ) UpperCamelCase_ = self.dummy_model() UpperCamelCase_ = self.dummy_sample_deter UpperCamelCase_ = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual UpperCamelCase_ = model(__a , __a ) # 2. predict previous mean of sample x_t-1 UpperCamelCase_ = scheduler.step(__a , __a , __a , generator=__a ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase_ = pred_prev_sample UpperCamelCase_ = torch.sum(torch.abs(__a ) ) UpperCamelCase_ = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=__a ) UpperCamelCase_ = scheduler.timesteps for i, timestep in enumerate(__a ): if i == len(__a ) - 1: UpperCamelCase_ = -1 else: UpperCamelCase_ = timesteps[i + 1] UpperCamelCase_ = scheduler.previous_timestep(__a ) UpperCamelCase_ = prev_t.item() self.assertEqual(__a , __a ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(__a , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__a ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = [1_0_0, 8_7, 5_0, 1, 0] UpperCamelCase_ = len(__a ) with self.assertRaises(__a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = self.scheduler_classes[0] UpperCamelCase_ = self.get_scheduler_config() UpperCamelCase_ = scheduler_class(**__a ) UpperCamelCase_ = [scheduler.config.num_train_timesteps] with self.assertRaises( __a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__a )
122
'''simple docstring''' import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) lowerCAmelCase_ : List[str] = logging.getLogger(__name__) lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]: try: if args.tpu_name: _a = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _a = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(lowercase ) tf.tpu.experimental.initialize_tpu_system(lowercase ) return tpu def _lowerCamelCase ( lowercase : List[str] ) -> Any: _a = 0 for file in file_list: _a = file.split("/" )[-1] _a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 ) _a = int(lowercase ) num_samples += sample_count return num_samples def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int: _a = count_samples(lowercase ) _a = tf.data.Dataset.from_tensor_slices(lowercase ) if shuffle: _a = dataset.shuffle(len(lowercase ) ) _a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) if shuffle: assert shuffle_buffer_size is not None _a = dataset.shuffle(args.shuffle_buffer_size ) _a = dataset.batch(lowercase , drop_remainder=lowercase ) _a = dataset.map(lowercase , num_parallel_calls=lowercase ) _a = dataset.prefetch(lowercase ) return dataset def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict: if not args.no_tpu: _a = initialize_tpu(lowercase ) _a = tf.distribute.TPUStrategy(lowercase ) else: _a = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _a = AutoTokenizer.from_pretrained(args.tokenizer ) _a = AutoConfig.from_pretrained(args.pretrained_model_config ) _a = tokenizer.vocab_size _a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' ) _a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' ) _a = count_samples(lowercase ) _a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _a = steps_per_epoch * args.num_epochs with strategy.scope(): _a = TFAutoModelForMaskedLM.from_config(lowercase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _a , _a = create_optimizer( num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=lowercase , metrics=["accuracy"] ) def decode_fn(lowercase : int ): _a = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(lowercase , lowercase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _a = DataCollatorForLanguageModeling( tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" ) def mask_with_collator(lowercase : List[Any] ): # TF really needs an isin() function _a = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _a , _a = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , ) return batch _a = args.per_replica_batch_size * strategy.num_replicas_in_sync _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , ) _a = prepare_dataset( lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , ) _a = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) ) model.fit( lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": lowerCAmelCase_ : Any = parse_args() main(args)
63
0
def __lowercase ( __lowerCAmelCase : str ): return "".join(chr(ord(__lowerCAmelCase ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
240
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='LayoutLMv3ImageProcessor' __a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast') def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor _a = self.image_processor(images=__a , return_tensors=__a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__a , __a ): _a = [text] # add batch dimension (as the image processor always adds a batch dimension) _a = features["words"] _a = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel values _a = features.pop("pixel_values" ) if return_overflowing_tokens is True: _a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] ) _a = images return encoded_inputs def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _a = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__a ) != len(__a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f' {len(__a )} and {len(__a )}' ) return images_with_overflow def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : Tuple ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def UpperCamelCase__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
0
"""simple docstring""" import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _snake_case = get_logger(__name__) class UpperCamelCase : UpperCamelCase : Dict = '''dummy_data''' UpperCamelCase : Optional[Any] = '''datasets''' UpperCamelCase : List[Any] = False def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[Version, str] , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[List[Callable]] = None , ) -> str: _a : Optional[Any] = 0 _a : str = dataset_name _a : List[Any] = cache_dir _a : int = use_local_dummy_data _a : List[str] = config # download_callbacks take a single url as input _a : str = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _a : Tuple = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _a : Dict = str(__a ) # to be downloaded _a : List[str] = None _a : int = None @property def _lowercase ( self : Dict ) -> str: if self._dummy_file is None: _a : Optional[int] = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self : Dict ) -> Tuple: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("""dummy""" , self.version_name ) @property def _lowercase ( self : Optional[int] ) -> Dict: return os.path.join(self.dummy_data_folder , """dummy_data.zip""" ) def _lowercase ( self : int ) -> Dict: _a : str = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _a : Union[str, Any] = cached_path( __a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a ) return os.path.join(__a , self.dummy_file_name ) @property def _lowercase ( self : Tuple ) -> Optional[Any]: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _lowercase ( self : Union[str, Any] ) -> List[str]: if self._bucket_url is None: _a : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) ) return self._bucket_url @property def _lowercase ( self : Optional[int] ) -> int: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] ) def _lowercase ( self : Dict , UpperCAmelCase__ : Tuple , *UpperCAmelCase__ : Tuple ) -> Union[str, Any]: if self.load_existing_dummy_data: # dummy data is downloaded and tested _a : Any = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _a : Any = self.dummy_file_name # special case when data_url is a dict if isinstance(__a , __a ): return self.create_dummy_data_dict(__a , __a ) elif isinstance(__a , (list, tuple) ): return self.create_dummy_data_list(__a , __a ) else: return self.create_dummy_data_single(__a , __a ) def _lowercase ( self : Any , UpperCAmelCase__ : int , *UpperCAmelCase__ : int ) -> Any: return self.download_and_extract(__a ) def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> Tuple: return self.download_and_extract(__a ) def _lowercase ( self : Tuple , UpperCAmelCase__ : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) -> List[str]: return path def _lowercase ( self : List[Any] ) -> int: return {} def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]: _a : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__a , __a ): for single_url in single_urls: download_callback(__a ) else: _a : Dict = single_urls download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__a , __a ): _a : List[str] = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls] else: _a : str = single_urls _a : List[str] = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) _a : int = value # make sure that values are unique if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _a : int = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Union[str, Any]: _a : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _a : List[Any] = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __a ) ) for url in data_url ) _a : int = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _a : Optional[Any] = [data_url[0]] * len(__a ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a : Optional[Any] = os.path.join(__a , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) ) dummy_data_list.append(__a ) return dummy_data_list def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Any: for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a : Optional[Any] = os.path.join(__a , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) ) if os.path.exists(__a ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self : Dict ) -> int: pass def _lowercase ( self : Dict ) -> str: pass def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Any ) -> List[Any]: def _iter_archive_members(UpperCAmelCase__ : Tuple ): # this preserves the order of the members inside the ZIP archive _a : Dict = Path(self.dummy_file ).parent _a : List[Any] = path.relative_to(__a ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _a : List[str] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__a ) _a : Any = Path(__a ) _a : Union[str, Any] = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("""*""" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ): yield file_path.relative_to(__a ).as_posix(), file_path.open("""rb""" ) def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Any: if not isinstance(__a , __a ): _a : List[Any] = [paths] for path in paths: if os.path.isfile(__a ): if os.path.basename(__a ).startswith((""".""", """__""") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__a ): if os.path.basename(__a ).startswith((""".""", """__""") ): continue dirnames.sort() for filename in sorted(__a ): if filename.startswith((""".""", """__""") ): continue yield os.path.join(__a , __a )
294
'''simple docstring''' from ....utils import logging lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ): _a = config.__dict__ _a = modal_hidden_size if num_labels: _a = num_labels
63
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class a__ ( lowerCamelCase_ ): """simple docstring""" __lowerCamelCase = 'trocr' __lowerCamelCase = ['past_key_values'] __lowerCamelCase = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=512 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=2 , lowercase=0.02 , lowercase=0.0 , lowercase=True , lowercase=False , lowercase=True , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> Optional[int]: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = decoder_layers A__ = decoder_attention_heads A__ = decoder_ffn_dim A__ = activation_function A__ = max_position_embeddings A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = init_std A__ = decoder_layerdrop A__ = use_cache A__ = scale_embedding A__ = use_learned_position_embeddings A__ = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
68
'''simple docstring''' def _lowerCamelCase ( lowercase : int = 100 ) -> int: _a = 0 _a = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
63
0
from ....utils import logging _snake_case = logging.get_logger(__name__) class _snake_case ( lowerCamelCase_ ): def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: Any=None , __lowerCamelCase: Optional[int]=20_48 ) -> List[Any]: __UpperCAmelCase : List[Any] = config.__dict__ __UpperCAmelCase : Optional[Any] = modal_hidden_size if num_labels: __UpperCAmelCase : Tuple = num_labels
157
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class A : '''simple docstring''' def __init__(self , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=6_4 , _UpperCAmelCase=None ) -> Optional[int]: __UpperCamelCase : Union[str, Any] = np.random.default_rng(__a ) __UpperCamelCase : Union[str, Any] = length __UpperCamelCase : Tuple = rng.normal(size=(length,) ).astype(np.floataa ) __UpperCamelCase : List[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__(self ) -> Dict: return self.length def __getitem__(self , _UpperCAmelCase ) -> Tuple: return {"x": self.x[i], "y": self.y[i]} class A ( torch.nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ) -> int: super().__init__() __UpperCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCamelCase : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __UpperCamelCase : Optional[int] = True def a_ (self , _UpperCAmelCase=None ) -> Optional[Any]: if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) __UpperCamelCase : Dict = False return x * self.a[0] + self.b[0] class A ( torch.nn.Module ): '''simple docstring''' def __init__(self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ) -> Union[str, Any]: super().__init__() __UpperCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__a ).float() ) __UpperCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__a ).float() ) __UpperCamelCase : str = True def a_ (self , _UpperCAmelCase=None ) -> Any: if self.first_batch: print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) __UpperCamelCase : int = False return x * self.a + self.b def __lowerCAmelCase ( snake_case__ , snake_case__ = 16 ): from datasets import load_dataset from transformers import AutoTokenizer __UpperCamelCase : int = AutoTokenizer.from_pretrained("bert-base-cased" ) __UpperCamelCase : Union[str, Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} __UpperCamelCase : List[str] = load_dataset("csv" , data_files=snake_case__ ) __UpperCamelCase : List[Any] = datasets["train"].unique("label" ) __UpperCamelCase : List[Any] = {v: i for i, v in enumerate(snake_case__ )} def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase : List[str] = tokenizer( examples["sentence1"] , examples["sentence2"] , truncation=snake_case__ , max_length=snake_case__ , padding="max_length" ) if "label" in examples: __UpperCamelCase : Dict = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase : List[str] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["sentence1", "sentence2", "label"] , ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(snake_case__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. __UpperCamelCase : Tuple = DataLoader(tokenized_datasets["train"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=2 ) __UpperCamelCase : Any = DataLoader(tokenized_datasets["validation"] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=1 ) return train_dataloader, eval_dataloader
298
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : List[str] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
63
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : int ={ 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class _lowercase ( lowerCamelCase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = '''gpt_bigcode''' SCREAMING_SNAKE_CASE__ : str = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :Optional[Any] , lowerCAmelCase__ :Tuple=50_257 , lowerCAmelCase__ :str=1_024 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Dict="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :Tuple=1E-5 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[int]=50_256 , lowerCAmelCase__ :Optional[int]=50_256 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :List[Any] , ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = vocab_size __SCREAMING_SNAKE_CASE : Dict = n_positions __SCREAMING_SNAKE_CASE : Tuple = n_embd __SCREAMING_SNAKE_CASE : int = n_layer __SCREAMING_SNAKE_CASE : Tuple = n_head __SCREAMING_SNAKE_CASE : str = n_inner __SCREAMING_SNAKE_CASE : List[str] = activation_function __SCREAMING_SNAKE_CASE : str = resid_pdrop __SCREAMING_SNAKE_CASE : Optional[Any] = embd_pdrop __SCREAMING_SNAKE_CASE : Tuple = attn_pdrop __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[str] = scale_attn_weights __SCREAMING_SNAKE_CASE : Optional[int] = use_cache __SCREAMING_SNAKE_CASE : str = attention_softmax_in_fpaa __SCREAMING_SNAKE_CASE : List[Any] = scale_attention_softmax_in_fpaa __SCREAMING_SNAKE_CASE : str = multi_query __SCREAMING_SNAKE_CASE : str = bos_token_id __SCREAMING_SNAKE_CASE : Any = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
9
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase_ : Any = get_tests_dir('fixtures') lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json') class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] ): _a = 0 def UpperCamelCase__ ( self : str ): _a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : List[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: _a = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _a = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("feature_extractor_type" ) _a = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved _a = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Tuple ): _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def UpperCamelCase__ ( self : Union[str, Any] ): with self.assertRaisesRegex( __a , "bert-base is not a local folder and is not a valid model identifier" ): _a = AutoFeatureExtractor.from_pretrained("bert-base" ) def UpperCamelCase__ ( self : Optional[Any] ): with self.assertRaisesRegex( __a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" ) def UpperCamelCase__ ( self : List[Any] ): with self.assertRaisesRegex( __a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def UpperCamelCase__ ( self : List[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def UpperCamelCase__ ( self : Any ): try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API _a = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) _a = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__ ( self : Tuple ): class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =True try: AutoConfig.register("custom" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _a = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__a , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
63
0
"""simple docstring""" import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]: """simple docstring""" lowerCAmelCase__ :List[str] = multiprocessing.Manager() lowerCAmelCase__ :Any = manager.list() lowerCAmelCase__ :Optional[int] = multiprocessing.Process(target=_SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('timed out' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowerCAmelCase__ :List[Any] = shutil.rmtree lowerCAmelCase__ :List[Any] = os.rmdir lowerCAmelCase__ :Optional[int] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowerCAmelCase__ :List[str] = {} with swallow_io(): with time_limit(_SCREAMING_SNAKE_CASE ): exec(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) result.append('passed' ) except TimeoutException: result.append('timed out' ) except BaseException as e: result.append(F"failed: {e}" ) # Needed for cleaning up. lowerCAmelCase__ :Any = rmtree lowerCAmelCase__ :List[str] = rmdir lowerCAmelCase__ :List[Any] = chdir @contextlib.contextmanager def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" def signal_handler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TimeoutException('Timed out!' ) signal.setitimer(signal.ITIMER_REAL , _SCREAMING_SNAKE_CASE ) signal.signal(signal.SIGALRM , _SCREAMING_SNAKE_CASE ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __A () ->Any: """simple docstring""" lowerCAmelCase__ :Optional[Any] = WriteOnlyStringIO() with contextlib.redirect_stdout(_SCREAMING_SNAKE_CASE ): with contextlib.redirect_stderr(_SCREAMING_SNAKE_CASE ): with redirect_stdin(_SCREAMING_SNAKE_CASE ): yield @contextlib.contextmanager def __A () ->int: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(_SCREAMING_SNAKE_CASE ): yield dirname class _lowerCAmelCase ( lowerCamelCase_ ): """simple docstring""" pass class _lowerCAmelCase ( io.StringIO ): """simple docstring""" def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' raise OSError def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' raise OSError def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' raise OSError def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return False class _lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore """simple docstring""" __magic_name__ :Tuple = """stdin""" @contextlib.contextmanager def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" if root == ".": yield return lowerCAmelCase__ :Any = os.getcwd() os.chdir(_SCREAMING_SNAKE_CASE ) try: yield except BaseException as exc: raise exc finally: os.chdir(_SCREAMING_SNAKE_CASE ) def __A (_SCREAMING_SNAKE_CASE=None ) ->Optional[int]: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowerCAmelCase__ :Optional[Any] = None lowerCAmelCase__ :List[str] = None import os lowerCAmelCase__ :int = '1' lowerCAmelCase__ :str = None lowerCAmelCase__ :str = None lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :Any = None lowerCAmelCase__ :Optional[Any] = None lowerCAmelCase__ :List[Any] = None lowerCAmelCase__ :Tuple = None lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :int = None lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :Optional[Any] = None lowerCAmelCase__ :Tuple = None lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :int = None lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :int = None lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Optional[Any] = None lowerCAmelCase__ :str = None lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Any = None lowerCAmelCase__ :List[Any] = None lowerCAmelCase__ :str = None import shutil lowerCAmelCase__ :int = None lowerCAmelCase__ :Tuple = None lowerCAmelCase__ :int = None import subprocess lowerCAmelCase__ :Any = None # type: ignore lowerCAmelCase__ :List[str] = None import sys lowerCAmelCase__ :List[str] = None lowerCAmelCase__ :Dict = None lowerCAmelCase__ :List[Any] = None lowerCAmelCase__ :Optional[int] = None lowerCAmelCase__ :Dict = None
293
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : int = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='gpt_bigcode' __a =['past_key_values'] __a ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ): _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = attention_softmax_in_fpaa _a = scale_attention_softmax_in_fpaa _a = multi_query _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
63
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" a = None class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" a = PandasConfig def lowercase_ ( self : Optional[int] ) -> str: return datasets.DatasetInfo(features=self.config.features ) def lowercase_ ( self : int , __lowerCamelCase : Optional[Any] ) -> List[Any]: if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__a , (str, list, tuple) ): SCREAMING_SNAKE_CASE__ = data_files if isinstance(__a , __a ): SCREAMING_SNAKE_CASE__ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] SCREAMING_SNAKE_CASE__ = [] for split_name, files in data_files.items(): if isinstance(__a , __a ): SCREAMING_SNAKE_CASE__ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__a ) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={'''files''': files} ) ) return splits def lowercase_ ( self : int , __lowerCamelCase : pa.Table ) -> Tuple: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE__ = table_cast(__a , self.config.features.arrow_schema ) return pa_table def lowercase_ ( self : str , __lowerCamelCase : str ) -> str: for i, file in enumerate(itertools.chain.from_iterable(__a ) ): with open(__a , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ = pa.Table.from_pandas(pd.read_pickle(__a ) ) yield i, self._cast_table(__a )
314
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: _a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
63
0
import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase: List[Any] = logging.get_logger(__name__) _lowercase: str = '▁' _lowercase: Dict = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} _lowercase: str = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } _lowercase: Any = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } _lowercase: Any = { 'ernie-m-base': 514, 'ernie-m-large': 514, } _lowercase: Any = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class _lowercase ( lowerCamelCase_ ): """simple docstring""" __A = ["input_ids"] __A = VOCAB_FILES_NAMES __A = PRETRAINED_INIT_CONFIGURATION __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = PRETRAINED_VOCAB_FILES_MAP __A = RESOURCE_FILES_NAMES def __init__(self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_="utf8" , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" a = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , vocab_file=__a , encoding=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) a = do_lower_case a = sentencepiece_model_ckpt a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__a ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: a = self.load_vocab(filepath=__a ) else: a = {self.sp_model.id_to_piece(__a ): id for id in range(self.sp_model.get_piece_size() )} a = {v: k for k, v in self.vocab.items()} def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if text is None: return None a = self.tokenize(__a ) a , a = "", [] for i, ch in enumerate(__a ): if ch in self.SP_CHAR_MAPPING: a = self.SP_CHAR_MAPPING.get(__a ) else: a = unicodedata.normalize("NFKC" , __a ) if self.is_whitespace(__a ): continue normalized_text += ch char_mapping.extend([i] * len(__a ) ) a , a , a = normalized_text, [], 0 if self.do_lower_case: a = text.lower() for token in split_tokens: if token[:1] == "▁": a = token[1:] a = text[offset:].index(__a ) + offset a = start + len(__a ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) a = end return token_mapping @property def UpperCamelCase_ (self ): """simple docstring""" return len(self.vocab ) def UpperCamelCase_ (self ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__(self ): """simple docstring""" a = self.__dict__.copy() a = None return state def __setstate__(self , lowerCamelCase_ ): """simple docstring""" a = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__a , __a ) for c in text) ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=64 , lowerCamelCase_=0.1 ): """simple docstring""" if self.sp_model_kwargs.get("enable_sampling" ) is True: a = True if self.sp_model_kwargs.get("alpha" ) is not None: a = self.sp_model_kwargs.get("alpha" ) if self.sp_model_kwargs.get("nbest_size" ) is not None: a = self.sp_model_kwargs.get("nbest_size" ) if not enable_sampling: a = self.sp_model.EncodeAsPieces(__a ) else: a = self.sp_model.SampleEncodeAsPieces(__a , __a , __a ) a = [] for pi, piece in enumerate(__a ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__a ) and pi != 0: new_pieces.append(__a ) continue else: continue a = 0 for i, chunk in enumerate(__a ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__a ) or self.is_punct(__a ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__a ) a = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) a = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) a = i if len(__a ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = self.convert_ids_to_tokens(__a ) a = "".join(__a ).replace(__a , " " ).strip() return out_string def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return self.vocab.get(__a , self.vocab.get(self.unk_token ) ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return self.reverse_vocab.get(__a , self.unk_token ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ): """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1] def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(__a ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__a ) + 1) + [1] * (len(__a ) + 3) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__a ) == 1: a = unicodedata.category(__a ) if cat == "Zs": return True return False def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = {} with io.open(__a , "r" , encoding="utf-8" ) as f: for index, line in enumerate(__a ): a = line.rstrip("\n" ) a = int(__a ) return token_to_idx def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ): """simple docstring""" a = 0 if os.path.isdir(__a ): a = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: a = (filename_prefix + "-" if filename_prefix else "") + save_directory with open(__a , "w" , encoding="utf-8" ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) a = token_index writer.write(token + "\n" ) index += 1 a = os.path.join(__a , "sentencepiece.bpe.model" ) with open(__a , "wb" ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__a ) return (vocab_file,)
227
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Optional[int] = { 'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='deta' __a ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(__a , __a ): _a = backbone_config.pop("model_type" ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__a ) _a = backbone_config _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine _a = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha super().__init__(is_encoder_decoder=__a , **__a ) @property def UpperCamelCase__ ( self : Optional[Any] ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self : Dict ): return self.d_model def UpperCamelCase__ ( self : List[str] ): _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
63
0
'''simple docstring''' import functools def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int: # Validation if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__, UpperCAmelCase__ ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__, UpperCAmelCase__ ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(UpperCAmelCase__ ) == 0: return 0 if min(UpperCAmelCase__ ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(UpperCAmelCase__ ) >= 3_66: raise ValueError("""All days elements should be less than 366""" ) A_ = set(UpperCAmelCase__ ) @functools.cache def dynamic_programming(UpperCAmelCase__ ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 30 ), ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
162
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]: _a = AutoTokenizer.from_pretrained(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase ) _a = tok.pad_token_id def get_lens(lowercase : Optional[int] ): _a = tqdm( DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a = [] for batch in dl: _a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist() _a = batch["labels"].ne(lowercase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowercase , lowercase ): max_lens.append(max(lowercase , lowercase ) ) else: max_lens.extend(lowercase ) return max_lens _a = get_lens(lowercase ) _a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase ) _a = get_lens(lowercase ) pickle_save(lowercase , train_ds.len_file ) pickle_save(lowercase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
63
0
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowercase_ ( lowerCamelCase_ , unittest.TestCase ): A__ : Dict = FlaxAutoencoderKL @property def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = 4 UpperCamelCase_ = 3 UpperCamelCase_ = (3_2, 3_2) UpperCamelCase_ = jax.random.PRNGKey(0 ) UpperCamelCase_ = jax.random.uniform(__a , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = { """block_out_channels""": [3_2, 6_4], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } UpperCamelCase_ = self.dummy_input return init_dict, inputs_dict
122
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : List[str] ): _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(__a ) ) def UpperCamelCase__ ( self : str ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): _a = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Any ): # pass variant but use the non-variant filenames _a = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[Any] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Dict ): _a = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : List[str] ): # pass variant but use the non-variant filenames _a = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] _a = "fp16" self.assertTrue(is_safetensors_compatible(__a , variant=__a ) ) def UpperCamelCase__ ( self : Optional[int] ): _a = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] _a = "fp16" self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
63
0
from math import factorial def __lowercase ( __lowerCAmelCase : int = 2_0 ): a__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... a__ = n // 2 return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: snake_case : str = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number.''')
240
'''simple docstring''' def _lowerCamelCase ( lowercase : bytes ) -> str: return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] ) def _lowerCamelCase ( lowercase : str ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(lowercase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowercase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
63
0
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _snake_case = logging.get_logger(__name__) # General docstring _snake_case = 'PoolFormerConfig' # Base docstring _snake_case = 'sail/poolformer_s12' _snake_case = [1, 512, 7, 7] # Image classification docstring _snake_case = 'sail/poolformer_s12' _snake_case = 'tabby, tabby cat' _snake_case = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False ): '''simple docstring''' if drop_prob == 0.0 or not training: return input _a : List[Any] = 1 - drop_prob _a : Dict = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets _a : Dict = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize _a : List[str] = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase ( nn.Module ): def __init__( self : int , UpperCAmelCase__ : Optional[float] = None ) -> Union[str, Any]: super().__init__() _a : Union[str, Any] = drop_prob def _lowercase ( self : List[str] , UpperCAmelCase__ : torch.Tensor ) -> Any: return drop_path(__a , self.drop_prob , self.training ) def _lowercase ( self : Union[str, Any] ) -> int: return "p={}".format(self.drop_prob ) class UpperCamelCase ( nn.Module ): def __init__( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=None ) -> int: super().__init__() _a : Dict = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size) _a : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride) _a : str = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding) _a : int = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a ) _a : Any = norm_layer(__a ) if norm_layer else nn.Identity() def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> int: _a : Union[str, Any] = self.projection(__a ) _a : str = self.norm(__a ) return embeddings class UpperCamelCase ( nn.GroupNorm ): def __init__( self : Tuple , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: super().__init__(1 , __a , **__a ) class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : int ) -> Dict: super().__init__() _a : List[Any] = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a ) def _lowercase ( self : Dict , UpperCAmelCase__ : List[Any] ) -> int: return self.pool(__a ) - hidden_states class UpperCamelCase ( nn.Module ): def __init__( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> int: super().__init__() _a : Tuple = nn.Convad(__a , __a , 1 ) _a : Union[str, Any] = nn.Convad(__a , __a , 1 ) _a : Any = PoolFormerDropPath(__a ) if isinstance(config.hidden_act , __a ): _a : Union[str, Any] = ACTaFN[config.hidden_act] else: _a : List[str] = config.hidden_act def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> str: _a : Dict = self.conva(__a ) _a : Tuple = self.act_fn(__a ) _a : List[str] = self.drop(__a ) _a : List[str] = self.conva(__a ) _a : int = self.drop(__a ) return hidden_states class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Tuple: super().__init__() _a : List[Any] = PoolFormerPooling(__a ) _a : str = PoolFormerOutput(__a , __a , __a , __a ) _a : Union[str, Any] = PoolFormerGroupNorm(__a ) _a : Tuple = PoolFormerGroupNorm(__a ) # Useful for training neural nets _a : List[str] = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity() _a : Optional[int] = config.use_layer_scale if config.use_layer_scale: _a : int = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) _a : Dict = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) def _lowercase ( self : int , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]: if self.use_layer_scale: _a : Optional[Any] = self.pooling(self.before_norm(__a ) ) _a : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection _a : Tuple = hidden_states + self.drop_path(__a ) _a : Any = () _a : Any = self.output(self.after_norm(__a ) ) _a : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection _a : Union[str, Any] = hidden_states + self.drop_path(__a ) _a : Union[str, Any] = (output,) + outputs return outputs else: _a : Optional[Any] = self.drop_path(self.pooling(self.before_norm(__a ) ) ) # First residual connection _a : int = pooling_output + hidden_states _a : Tuple = () # Second residual connection inside the PoolFormerOutput block _a : List[Any] = self.drop_path(self.output(self.after_norm(__a ) ) ) _a : Any = hidden_states + layer_output _a : Dict = (output,) + outputs return outputs class UpperCamelCase ( nn.Module ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]: super().__init__() _a : Union[str, Any] = config # stochastic depth decay rule _a : Tuple = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings _a : int = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) _a : Union[str, Any] = nn.ModuleList(__a ) # Transformer blocks _a : str = [] _a : Optional[Any] = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers _a : Optional[Any] = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__a ) ) _a : str = nn.ModuleList(__a ) def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]: _a : Dict = () if output_hidden_states else None _a : Dict = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): _a , _a : Dict = layers # Get patch embeddings from hidden_states _a : List[str] = embedding_layer(__a ) # Send the embeddings through the blocks for _, blk in enumerate(__a ): _a : List[Any] = blk(__a ) _a : List[Any] = layer_outputs[0] if output_hidden_states: _a : int = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class UpperCamelCase ( lowerCamelCase_ ): UpperCamelCase : Union[str, Any] = PoolFormerConfig UpperCamelCase : Tuple = '''poolformer''' UpperCamelCase : List[str] = '''pixel_values''' UpperCamelCase : Tuple = True def _lowercase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=False ) -> str: if isinstance(__a , __a ): _a : Tuple = value _snake_case = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _snake_case = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowerCamelCase_ , ) class UpperCamelCase ( lowerCamelCase_ ): def __init__( self : str , UpperCAmelCase__ : Optional[Any] ) -> Tuple: super().__init__(__a ) _a : Dict = config _a : List[Any] = PoolFormerEncoder(__a ) # Initialize weights and apply final processing self.post_init() def _lowercase ( self : str ) -> str: return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> Any: _a : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) _a : Dict = self.encoder( __a , output_hidden_states=__a , return_dict=__a , ) _a : Any = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase ( nn.Module ): def __init__( self : Any , UpperCAmelCase__ : int ) -> Optional[int]: super().__init__() _a : Any = nn.Linear(config.hidden_size , config.hidden_size ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Any: _a : List[str] = self.dense(__a ) return output @add_start_docstrings( '''\n PoolFormer Model transformer with an image classification head on top\n ''' , lowerCamelCase_ , ) class UpperCamelCase ( lowerCamelCase_ ): def __init__( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]: super().__init__(__a ) _a : Dict = config.num_labels _a : Optional[Any] = PoolFormerModel(__a ) # Final norm _a : List[str] = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head _a : List[str] = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowercase ( self : Any , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> Any: _a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict _a : List[Any] = self.poolformer( __a , output_hidden_states=__a , return_dict=__a , ) _a : List[Any] = outputs[0] _a : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) ) _a : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _a : Any = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _a : Optional[Any] = """single_label_classification""" else: _a : List[Any] = """multi_label_classification""" if self.config.problem_type == "regression": _a : List[str] = MSELoss() if self.num_labels == 1: _a : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: _a : List[str] = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": _a : List[str] = CrossEntropyLoss() _a : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _a : Optional[int] = BCEWithLogitsLoss() _a : Optional[Any] = loss_fct(__a , __a ) if not return_dict: _a : Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
294
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any: model.train() _a = model(lowercase ) _a = F.mse_loss(lowercase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowercase ) def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]: set_seed(42 ) _a = RegressionModel() _a = deepcopy(lowercase ) _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) model.to(accelerator.device ) if sched: _a = AdamW(params=model.parameters() , lr=1E-3 ) _a = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) _a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 ) # Make a copy of `model` if sched: _a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase ) else: _a , _a = accelerator.prepare(lowercase , lowercase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]: # Test when on a single CPU or GPU that the context manager does nothing _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowercase , lowercase , lowercase , lowercase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : Tuple ) -> Tuple: # Test on distributed setup that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) # Use a single batch _a , _a = next(iter(lowercase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) else: # Sync grads step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a = get_training_setup(lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _a = ddp_input[torch.randperm(len(lowercase ) )] GradientState._reset_state() def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict: _a = Accelerator( split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase ) for iteration, batch in enumerate(lowercase ): _a , _a = batch.values() # Gather the distributed inputs and targs for the base model _a , _a = accelerator.gather((ddp_input, ddp_target) ) _a , _a = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowercase , lowercase , lowercase , lowercase , lowercase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowercase ): step_model(lowercase , lowercase , lowercase , lowercase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase )) if accelerator.num_processes > 1: check_model_parameters(lowercase , lowercase , lowercase , lowercase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _lowerCamelCase ( ) -> Any: _a = Accelerator() _a = RegressionDataset(length=80 ) _a = DataLoader(lowercase , batch_size=16 ) _a = RegressionDataset(length=96 ) _a = DataLoader(lowercase , batch_size=16 ) _a , _a = accelerator.prepare(lowercase , lowercase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if iteration < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowercase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase ) if batch_num < len(lowercase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _lowerCamelCase ( ) -> Optional[Any]: _a = Accelerator() _a = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(lowercase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(lowercase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(lowercase , lowercase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase ) def _lowerCamelCase ( lowercase : Any ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
63
0
import requests from bsa import BeautifulSoup def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str = "https://www.worldometers.info/coronavirus" ) -> dict: '''simple docstring''' A__ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" ) A__ = soup.findAll("h1" ) A__ = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} if __name__ == "__main__": print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""") for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
68
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : List[str] = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='trocr' __a =['past_key_values'] __a ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ): _a = vocab_size _a = d_model _a = decoder_layers _a = decoder_attention_heads _a = decoder_ffn_dim _a = activation_function _a = max_position_embeddings _a = dropout _a = attention_dropout _a = activation_dropout _a = init_std _a = decoder_layerdrop _a = use_cache _a = scale_embedding _a = use_learned_position_embeddings _a = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
63
0
def _UpperCamelCase ( snake_case__ ) -> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
157
'''simple docstring''' import argparse import os import re lowerCAmelCase_ : Any = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"') def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]: with open(lowercase , "r" , encoding="utf-8" ) as f: _a = f.read() _a = content.split("\n" ) _a = [] _a = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 _a = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _a = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase , "w" , encoding="utf-8" ) as f: f.write("\n".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def _lowerCamelCase ( lowercase : bool = False ) -> List[str]: _a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )] _a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _a = [f for f, d in zip(lowercase , lowercase ) if d] raise ValueError( F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix' " this." ) if __name__ == "__main__": lowerCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCAmelCase_ : Optional[int] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
63
0
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html _lowerCAmelCase = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ): if attention_mask is None: __UpperCamelCase : Any = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __UpperCamelCase : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __UpperCamelCase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class A : '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.02 , ) -> int: __UpperCamelCase : Dict = parent __UpperCamelCase : Tuple = batch_size __UpperCamelCase : List[Any] = seq_length __UpperCamelCase : Union[str, Any] = is_training __UpperCamelCase : Dict = use_labels __UpperCamelCase : Optional[int] = vocab_size __UpperCamelCase : Optional[Any] = hidden_size __UpperCamelCase : Any = num_hidden_layers __UpperCamelCase : List[str] = num_attention_heads __UpperCamelCase : Optional[Any] = intermediate_size __UpperCamelCase : int = hidden_act __UpperCamelCase : Any = hidden_dropout_prob __UpperCamelCase : List[Any] = attention_probs_dropout_prob __UpperCamelCase : Dict = max_position_embeddings __UpperCamelCase : Dict = eos_token_id __UpperCamelCase : Union[str, Any] = pad_token_id __UpperCamelCase : Optional[Any] = bos_token_id __UpperCamelCase : Tuple = initializer_range def a_ (self ) -> Optional[int]: __UpperCamelCase : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __UpperCamelCase : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __UpperCamelCase : str = shift_tokens_right(__a , 1 , 2 ) __UpperCamelCase : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__a , ) __UpperCamelCase : Tuple = prepare_blenderbot_inputs_dict(__a , __a , __a ) return config, inputs_dict def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: __UpperCamelCase : List[Any] = 2_0 __UpperCamelCase : Optional[int] = model_class_name(__a ) __UpperCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] ) __UpperCamelCase , __UpperCamelCase : Any = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __UpperCamelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) __UpperCamelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __UpperCamelCase : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) __UpperCamelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __UpperCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , ) __UpperCamelCase : List[Any] = model.decode(__a , __a ) __UpperCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = 2_0 __UpperCamelCase : List[str] = model_class_name(__a ) __UpperCamelCase : List[Any] = model.encode(inputs_dict["input_ids"] ) __UpperCamelCase , __UpperCamelCase : str = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __UpperCamelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCamelCase : str = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) __UpperCamelCase : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCamelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) __UpperCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __UpperCamelCase : List[Any] = model.decode( decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , ) __UpperCamelCase : List[Any] = model.decode(__a , __a , decoder_attention_mask=__a ) __UpperCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) @require_flax class A ( unittest.TestCase ): '''simple docstring''' A = 9_9 def a_ (self ) -> Dict: __UpperCamelCase : Union[str, Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) __UpperCamelCase : Optional[Any] = input_ids.shape[0] __UpperCamelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = self._get_config_and_data() __UpperCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(__a ) __UpperCamelCase : str = lm_model(input_ids=__a ) __UpperCamelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , __a ) def a_ (self ) -> Optional[Any]: __UpperCamelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) __UpperCamelCase : List[str] = FlaxBlenderbotForConditionalGeneration(__a ) __UpperCamelCase : str = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) __UpperCamelCase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) __UpperCamelCase : Optional[Any] = lm_model(input_ids=__a , decoder_input_ids=__a ) __UpperCamelCase : Any = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , __a ) def a_ (self ) -> Union[str, Any]: __UpperCamelCase : str = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) __UpperCamelCase : Union[str, Any] = shift_tokens_right(__a , 1 , 2 ) __UpperCamelCase : List[Any] = np.equal(__a , 1 ).astype(np.floataa ).sum() __UpperCamelCase : int = np.equal(__a , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__a , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class A ( lowerCamelCase_ , unittest.TestCase , lowerCamelCase_ ): '''simple docstring''' A = True A = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) A = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def a_ (self ) -> int: __UpperCamelCase : Optional[int] = FlaxBlenderbotModelTester(self ) def a_ (self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__a , __a , __a ) def a_ (self ) -> int: __UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a ) def a_ (self ) -> Optional[Any]: __UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : List[Any] = self._prepare_for_class(__a , __a ) __UpperCamelCase : Union[str, Any] = model_class(__a ) @jax.jit def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model.encode(input_ids=__a , attention_mask=__a ) with self.subTest("JIT Enabled" ): __UpperCamelCase : Tuple = encode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : str = encode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) def a_ (self ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCamelCase : Optional[Any] = model_class(__a ) __UpperCamelCase : Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __UpperCamelCase : Tuple = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return model.decode( decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , ) with self.subTest("JIT Enabled" ): __UpperCamelCase : Any = decode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __UpperCamelCase : List[str] = decode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a_ (self ) -> Optional[Any]: for model_class_name in self.all_model_classes: __UpperCamelCase : Any = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __UpperCamelCase : List[str] = np.ones((1, 1) ) * model.config.eos_token_id __UpperCamelCase : Optional[int] = model(__a ) self.assertIsNotNone(__a ) @unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." ) @slow def a_ (self ) -> Optional[Any]: __UpperCamelCase : Dict = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5} __UpperCamelCase : List[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True} __UpperCamelCase : str = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__a ) __UpperCamelCase : List[Any] = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" ) __UpperCamelCase : Dict = ["Sam"] __UpperCamelCase : Optional[Any] = tokenizer(__a , return_tensors="jax" ) __UpperCamelCase : Any = model.generate(**__a , **__a ) __UpperCamelCase : List[str] = "Sam is a great name. It means \"sun\" in Gaelic." __UpperCamelCase : Union[str, Any] = tokenizer.batch_decode(__a , **__a ) assert generated_txt[0].strip() == tgt_text
298
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Tuple = { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json', 'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json', 'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a ='big_bird' def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ): super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = use_cache _a = rescale_embeddings _a = attention_type _a = use_bias _a = block_size _a = num_random_blocks _a = classifier_dropout class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" @property def UpperCamelCase__ ( self : Optional[int] ): if self.task == "multiple-choice": _a = {0: "batch", 1: "choice", 2: "sequence"} else: _a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
63
0
import argparse import copy def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Any = {} with open(lowercase__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: __SCREAMING_SNAKE_CASE : int = [] _list.append([line.split()[1], line.split()[2]] ) __SCREAMING_SNAKE_CASE : Dict = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: __SCREAMING_SNAKE_CASE : List[str] = [] _list.append([line.split()[0], line.split()[2]] ) __SCREAMING_SNAKE_CASE : Dict = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _UpperCamelCase ( lowercase__ , lowercase__ ): with open(lowercase__ ) as f: __SCREAMING_SNAKE_CASE : Optional[Any] = f.read(1 ) __SCREAMING_SNAKE_CASE : str = start_node __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : str = start_node __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while visiting not in first_solution: __SCREAMING_SNAKE_CASE : str = 10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(lowercase__ ) and k[0] not in first_solution: __SCREAMING_SNAKE_CASE : List[str] = k[1] __SCREAMING_SNAKE_CASE : Dict = k[0] first_solution.append(lowercase__ ) __SCREAMING_SNAKE_CASE : int = distance_of_first_solution + int(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = best_node first_solution.append(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 __SCREAMING_SNAKE_CASE : Dict = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : int = [] for n in solution[1:-1]: __SCREAMING_SNAKE_CASE : List[Any] = solution.index(lowercase__ ) for kn in solution[1:-1]: __SCREAMING_SNAKE_CASE : Tuple = solution.index(lowercase__ ) if n == kn: continue __SCREAMING_SNAKE_CASE : Any = copy.deepcopy(lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = kn __SCREAMING_SNAKE_CASE : str = n __SCREAMING_SNAKE_CASE : Dict = 0 for k in _tmp[:-1]: __SCREAMING_SNAKE_CASE : Union[str, Any] = _tmp[_tmp.index(lowercase__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: __SCREAMING_SNAKE_CASE : Tuple = distance + int(i[1] ) _tmp.append(lowercase__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) __SCREAMING_SNAKE_CASE : int = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowercase__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : Dict = 1 __SCREAMING_SNAKE_CASE : List[Any] = first_solution __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : List[Any] = distance_of_first_solution __SCREAMING_SNAKE_CASE : List[str] = solution while count <= iters: __SCREAMING_SNAKE_CASE : int = find_neighborhood(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : Tuple = 0 __SCREAMING_SNAKE_CASE : Tuple = neighborhood[index_of_best_solution] __SCREAMING_SNAKE_CASE : Tuple = len(lowercase__ ) - 1 __SCREAMING_SNAKE_CASE : Dict = False while not found: __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while i < len(lowercase__ ): if best_solution[i] != solution[i]: __SCREAMING_SNAKE_CASE : str = best_solution[i] __SCREAMING_SNAKE_CASE : str = solution[i] break __SCREAMING_SNAKE_CASE : List[Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = True __SCREAMING_SNAKE_CASE : str = best_solution[:-1] __SCREAMING_SNAKE_CASE : List[Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: __SCREAMING_SNAKE_CASE : str = cost __SCREAMING_SNAKE_CASE : str = solution else: __SCREAMING_SNAKE_CASE : Union[str, Any] = index_of_best_solution + 1 __SCREAMING_SNAKE_CASE : str = neighborhood[index_of_best_solution] if len(lowercase__ ) >= size: tabu_list.pop(0 ) __SCREAMING_SNAKE_CASE : Tuple = count + 1 return best_solution_ever, best_cost def _UpperCamelCase ( lowercase__=None ): __SCREAMING_SNAKE_CASE : Tuple = generate_neighbours(args.File ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = generate_first_solution( args.File , lowercase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = tabu_search( lowercase__ , lowercase__ , lowercase__ , args.Iterations , args.Size , ) print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": __lowerCAmelCase : int =argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
9
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : Dict , *, __a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ): super().__init__() _a = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings _a = nn.Linear(__a , __a ) _a = nn.Linear(__a , __a ) # parameters for encoder hidden states _a = clip_extra_context_tokens _a = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) _a = nn.Linear(__a , __a ) _a = nn.LayerNorm(__a ) def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ): if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings _a = image_embeddings.shape[0] _a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) _a = classifier_free_guidance_embeddings.expand( __a , -1 ) _a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] _a = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... _a = self.embedding_proj(__a ) _a = self.clip_image_embeddings_project_to_time_embeddings(__a ) _a = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" _a = self.clip_extra_context_tokens_proj(__a ) _a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) _a = clip_extra_context_tokens.permute(0 , 2 , 1 ) _a = self.encoder_hidden_states_proj(__a ) _a = self.text_encoder_hidden_states_norm(__a ) _a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
63
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->float: """simple docstring""" lowerCAmelCase__ :Optional[int] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: lowerCAmelCase__ :Any = 1 - (matter_density + radiation_density + dark_energy) lowerCAmelCase__ :Any = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowerCAmelCase__ :str = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation __A = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
293
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( lowercase : Dict ) -> Any: _a = filter(lambda lowercase : p.requires_grad , model.parameters() ) _a = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase_ : int = logging.getLogger(__name__) def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any: if metric == "rouge2": _a = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": _a = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": _a = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' " function." ) _a = ModelCheckpoint( dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]: return EarlyStopping( monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , ) class __SCREAMING_SNAKE_CASE (pl.Callback ): """simple docstring""" def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ): _a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ): logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' ) _a = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results _a = Path(pl_module.hparams.output_dir ) if type_path == "test": _a = od / "test_results.txt" _a = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _a = od / f'{type_path}_results/{trainer.global_step:05d}.txt' _a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue _a = metrics[key] if isinstance(__a , torch.Tensor ): _a = val.item() _a = f'{key}: {val:.6f}\n' writer.write(__a ) if not save_generations: return if "preds" in metrics: _a = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ): try: _a = pl_module.model.model.num_parameters() except AttributeError: _a = pl_module.model.num_parameters() _a = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
63
0
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCAmelCase__ ( lowerCamelCase_ ): """simple docstring""" def __init__( self : int , __lowerCamelCase : Optional[NestedDataStructureLike[PathLike]] = None , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : int , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = path_or_paths SCREAMING_SNAKE_CASE__ = split if split or isinstance(__a , __a ) else '''train''' SCREAMING_SNAKE_CASE__ = features SCREAMING_SNAKE_CASE__ = cache_dir SCREAMING_SNAKE_CASE__ = keep_in_memory SCREAMING_SNAKE_CASE__ = streaming SCREAMING_SNAKE_CASE__ = num_proc SCREAMING_SNAKE_CASE__ = kwargs @abstractmethod def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]: pass class UpperCAmelCase__ ( lowerCamelCase_ ): """simple docstring""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = features SCREAMING_SNAKE_CASE__ = cache_dir SCREAMING_SNAKE_CASE__ = keep_in_memory SCREAMING_SNAKE_CASE__ = streaming SCREAMING_SNAKE_CASE__ = num_proc SCREAMING_SNAKE_CASE__ = kwargs @abstractmethod def lowercase_ ( self : List[Any] ) -> Optional[int]: pass
314
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : """simple docstring""" def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ): _a = 0.0 _a = 0.0 for i in range(len(__a ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ): for i in range(len(__a ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _lowerCamelCase ( ) -> None: # Training Examples ( m, n ) _a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _a = SelfOrganizingMap() _a = 3 _a = 0.5 for _ in range(lowercase ): for j in range(len(lowercase ) ): # training sample _a = training_samples[j] # Compute the winning vector _a = self_organizing_map.get_winner(lowercase , lowercase ) # Update the winning vector _a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase ) # classify test sample _a = [0, 0, 0, 1] _a = self_organizing_map.get_winner(lowercase , lowercase ) # results print(F'Clusters that the test sample belongs to : {winner}' ) print(F'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
63
0
from functools import reduce _lowercase: Optional[Any] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def a( A : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 13] ) ) for i in range(len(A ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
227
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =['image_processor', 'tokenizer'] __a ='OwlViTImageProcessor' __a =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ): _a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _a = kwargs.pop("feature_extractor" ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )): _a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )] elif isinstance(__a , __a ) and isinstance(text[0] , __a ): _a = [] # Maximum number of queries across batch _a = max([len(__a ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__a ) != max_num_queries: _a = t + [" "] * (max_num_queries - len(__a )) _a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a ) encodings.append(__a ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __a , return_tensors=__a , **__a ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ): return self.image_processor.post_process(*__a , **__a ) def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ): return self.image_processor.post_process_object_detection(*__a , **__a ) def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ): return self.image_processor.post_process_image_guided_detection(*__a , **__a ) def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ): return self.tokenizer.batch_decode(*__a , **__a ) def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ): return self.tokenizer.decode(*__a , **__a ) @property def UpperCamelCase__ ( self : List[str] ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def UpperCamelCase__ ( self : str ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
63
0
'''simple docstring''' import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( lowerCamelCase_ ): lowercase = "naver-clova-ix/donut-base-finetuned-docvqa" lowercase = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) lowercase = "document_qa" lowercase = AutoProcessor lowercase = VisionEncoderDecoderModel lowercase = ["image", "text"] lowercase = ["text"] def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]: '''simple docstring''' if not is_vision_available(): raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" ) super().__init__(*__a , **__a ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' A_ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" A_ = task_prompt.replace("""{user_input}""" , __a ) A_ = self.pre_processor.tokenizer( __a , add_special_tokens=__a , return_tensors="""pt""" ).input_ids A_ = self.pre_processor(__a , return_tensors="""pt""" ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def snake_case_ ( self , UpperCamelCase__ ) -> int: '''simple docstring''' return self.model.generate( inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' A_ = self.pre_processor.batch_decode(__a )[0] A_ = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" ) A_ = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" ) A_ = re.sub(R"""<.*?>""" , """""" , __a , count=1 ).strip() # remove first task start token A_ = self.pre_processor.tokenajson(__a ) return sequence["answer"]
162
'''simple docstring''' def _lowerCamelCase ( lowercase : str ) -> list: if n_term == "": return [] _a = [] for temp in range(int(lowercase ) ): series.append(F'1/{temp + 1}' if series else "1" ) return series if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
63
0
"""simple docstring""" import math def UpperCAmelCase__ (snake_case__ : int = 1_00 ): """simple docstring""" _snake_case : str = sum(i * i for i in range(1 , n + 1 ) ) _snake_case : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'''{solution() = }''')
64
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ): """simple docstring""" for attribute in key.split(""".""" ): _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: _snake_case : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _snake_case : int = value elif weight_type == "weight_g": _snake_case : str = value elif weight_type == "weight_v": _snake_case : Tuple = value elif weight_type == "bias": _snake_case : List[str] = value else: _snake_case : int = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ): """simple docstring""" _snake_case : List[Any] = [] _snake_case : Optional[Any] = fairseq_model.state_dict() _snake_case : str = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _snake_case : Optional[Any] = None for name, value in fairseq_dict.items(): _snake_case : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _snake_case : Dict = True elif name.split(""".""" )[0] == "proj": _snake_case : Dict = fairseq_model.proj _snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2] _snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ ) if "weight_g" in name: _snake_case : str = """weight_g""" elif "weight_v" in name: _snake_case : Optional[Any] = """weight_v""" elif "bias" in name: _snake_case : Union[str, Any] = """bias""" elif "weight" in name: _snake_case : int = """weight""" else: _snake_case : Optional[int] = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) return proj_weight def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ): """simple docstring""" _snake_case : Any = full_name.split("""conv_layers.""" )[-1] _snake_case : Optional[int] = name.split(""".""" ) _snake_case : List[str] = int(items[0] ) _snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _snake_case : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _snake_case : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) _snake_case : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) _snake_case : List[str] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case : Optional[Any] = emb.weight.shape _snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) _snake_case : Union[str, Any] = emb.weight.data return lin_layer def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: _snake_case : Any = f.readlines() _snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines] _snake_case : str = len(snake_case__ ) _snake_case : Tuple = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ): """simple docstring""" _snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ ) _snake_case : List[str] = SpeechaTextaConfig.from_pretrained( snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ ) _snake_case : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) _snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) _snake_case : Optional[Any] = model[0].eval() # set weights for wav2vec2 encoder _snake_case : Any = WavaVecaModel(snake_case__ ) _snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ ) _snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ ) _snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) _snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) _snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ ) _snake_case : Any = False # add projection layer _snake_case : int = nn.Parameter(projection_layer.weight ) _snake_case : Any = nn.Parameter(projection_layer.bias ) _snake_case : Any = create_vocab_dict(snake_case__ ) with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp: json.dump(snake_case__ , snake_case__ ) _snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) ) tokenizer.save_pretrained(snake_case__ ) _snake_case : str = hf_wavavec.config.to_dict() _snake_case : List[str] = tokenizer.pad_token_id _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Union[str, Any] = tokenizer.eos_token_id _snake_case : Optional[Any] = """speech_to_text_2""" _snake_case : Optional[int] = """wav2vec2""" _snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ ) hf_wavavec.save_pretrained(snake_case__ ) feature_extractor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') A_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
64
1
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : str ): """simple docstring""" _snake_case : Tuple = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, nicht wahr?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] _snake_case : int = { """wmt16-en-de-dist-12-1""": [28.3, 27.52], """wmt16-en-de-dist-6-1""": [27.4, 27.11], """wmt16-en-de-12-1""": [26.9, 25.75], } _snake_case : Union[str, Any] = F"{src_lang}-{tgt_lang}" _snake_case : Dict = F"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n" model_card_dir.mkdir(parents=snake_case__ , exist_ok=snake_case__ ) _snake_case : Any = os.path.join(snake_case__ , """README.md""" ) print(F"Generating {path}" ) with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f: f.write(snake_case__ ) # make sure we are under the root of the project A_ = Path(__file__).resolve().parent.parent.parent A_ = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: A_ = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
64
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ = 16 A_ = 32 def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _snake_case : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ : Any ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : List[Any] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : str = 16 elif accelerator.mixed_precision != "no": _snake_case : Optional[int] = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. _snake_case : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) _snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": _snake_case : List[Any] = 2 # Initialize accelerator _snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config["""lr"""] _snake_case : str = int(config["""num_epochs"""] ) _snake_case : Union[str, Any] = int(config["""seed"""] ) _snake_case : Union[str, Any] = int(config["""batch_size"""] ) _snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case__ ) def inner_training_loop(snake_case__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ ) _snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate scheduler _snake_case : str = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : int = model(**snake_case__ ) _snake_case : str = outputs.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) _snake_case : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , snake_case__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _snake_case : Dict = parser.parse_args() _snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
64
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "rwkv" lowercase__ = {"max_position_embeddings": "context_length"} def __init__( self: Any, a_: Optional[Any]=50_277, a_: List[Any]=1_024, a_: List[Any]=4_096, a_: Any=32, a_: List[str]=None, a_: Optional[int]=None, a_: List[Any]=1E-5, a_: Union[str, Any]=0, a_: Dict=0, a_: Dict=6, a_: str=False, a_: List[str]=True, **a_: Dict, ): '''simple docstring''' _snake_case : Optional[int] = vocab_size _snake_case : Tuple = context_length _snake_case : List[Any] = hidden_size _snake_case : int = num_hidden_layers _snake_case : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size _snake_case : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size _snake_case : Optional[int] = layer_norm_epsilon _snake_case : List[str] = rescale_every _snake_case : Union[str, Any] = use_cache _snake_case : Optional[Any] = bos_token_id _snake_case : Dict = eos_token_id super().__init__( tie_word_embeddings=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
64
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ): """simple docstring""" _snake_case : Any = None if token is not None: _snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} # The id of a workflow (not of a workflow run) _snake_case : List[str] = """636036""" _snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" _snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json() return result["workflow_runs"] def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : str = get_daily_ci_runs(snake_case__ ) _snake_case : str = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _snake_case : List[str] = workflow_run["""id"""] break return workflow_run_id def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ ) if workflow_run_id is not None: _snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _snake_case : Optional[int] = artifacts_links[artifact_name] download_artifact( artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ): """simple docstring""" get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ ) _snake_case : int = {} for artifact_name in artifact_names: _snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" ) if os.path.isfile(snake_case__ ): _snake_case : Tuple = {} with zipfile.ZipFile(snake_case__ ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case__ ): # read the file with z.open(snake_case__ ) as f: _snake_case : Any = f.read().decode("""UTF-8""" ) return results
64
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING A_ = logging.get_logger(__name__) A_ = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "deta" lowercase__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self: Any, a_: Tuple=None, a_: List[str]=900, a_: List[str]=2_048, a_: List[Any]=6, a_: Optional[Any]=2_048, a_: int=8, a_: int=6, a_: List[Any]=1_024, a_: Union[str, Any]=8, a_: Any=0.0, a_: List[str]=True, a_: str="relu", a_: int=256, a_: List[Any]=0.1, a_: Dict=0.0, a_: List[Any]=0.0, a_: Tuple=0.02, a_: List[Any]=1.0, a_: Optional[Any]=True, a_: int=False, a_: Any="sine", a_: List[str]=5, a_: Dict=4, a_: Optional[int]=4, a_: Union[str, Any]=True, a_: List[str]=300, a_: int=True, a_: List[str]=True, a_: Optional[Any]=1, a_: Any=5, a_: str=2, a_: Tuple=1, a_: Union[str, Any]=1, a_: List[str]=5, a_: Union[str, Any]=2, a_: str=0.1, a_: str=0.25, **a_: Dict, ): '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _snake_case : List[str] = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(a_, a_ ): _snake_case : Any = backbone_config.pop("""model_type""" ) _snake_case : Tuple = CONFIG_MAPPING[backbone_model_type] _snake_case : Tuple = config_class.from_dict(a_ ) _snake_case : Union[str, Any] = backbone_config _snake_case : int = num_queries _snake_case : Dict = max_position_embeddings _snake_case : Optional[int] = d_model _snake_case : int = encoder_ffn_dim _snake_case : Dict = encoder_layers _snake_case : Any = encoder_attention_heads _snake_case : List[str] = decoder_ffn_dim _snake_case : Optional[Any] = decoder_layers _snake_case : Optional[int] = decoder_attention_heads _snake_case : Dict = dropout _snake_case : Dict = attention_dropout _snake_case : int = activation_dropout _snake_case : int = activation_function _snake_case : List[str] = init_std _snake_case : Dict = init_xavier_std _snake_case : str = encoder_layerdrop _snake_case : int = auxiliary_loss _snake_case : Dict = position_embedding_type # deformable attributes _snake_case : Dict = num_feature_levels _snake_case : Optional[int] = encoder_n_points _snake_case : Union[str, Any] = decoder_n_points _snake_case : Optional[Any] = two_stage _snake_case : Dict = two_stage_num_proposals _snake_case : Optional[int] = with_box_refine _snake_case : int = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher _snake_case : int = class_cost _snake_case : Optional[Any] = bbox_cost _snake_case : int = giou_cost # Loss coefficients _snake_case : str = mask_loss_coefficient _snake_case : Optional[Any] = dice_loss_coefficient _snake_case : Optional[int] = bbox_loss_coefficient _snake_case : Optional[Any] = giou_loss_coefficient _snake_case : Dict = eos_coefficient _snake_case : Any = focal_alpha super().__init__(is_encoder_decoder=a_, **a_ ) @property def UpperCamelCase_ ( self: int ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return self.d_model def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Tuple = copy.deepcopy(self.__dict__ ) _snake_case : List[str] = self.backbone_config.to_dict() _snake_case : Tuple = self.__class__.model_type return output
64
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging A_ = logging.get_logger(__name__) class lowercase: '''simple docstring''' lowercase__ = 42 lowercase__ = None @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def UpperCamelCase_ ( cls: Tuple ): '''simple docstring''' return f"`pip install {cls.pip_package or cls.name}`" class lowercase( __a ): '''simple docstring''' lowercase__ = "optuna" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ): '''simple docstring''' return run_hp_search_optuna(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Any ): '''simple docstring''' return default_hp_space_optuna(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "ray" lowercase__ = "'ray[tune]'" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_ray_available() def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ): '''simple docstring''' return run_hp_search_ray(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: Tuple ): '''simple docstring''' return default_hp_space_ray(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "sigopt" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ): '''simple docstring''' return run_hp_search_sigopt(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: List[str] ): '''simple docstring''' return default_hp_space_sigopt(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "wandb" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ): '''simple docstring''' return run_hp_search_wandb(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: Any ): '''simple docstring''' return default_hp_space_wandb(a_ ) A_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(snake_case__ ) > 0: _snake_case : Any = available_backends[0].name if len(snake_case__ ) > 1: logger.info( F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
64
1
"""simple docstring""" import math def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : Dict = [True] * n _snake_case : List[str] = False _snake_case : Optional[int] = False _snake_case : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): _snake_case : int = i * 2 while index < n: _snake_case : str = False _snake_case : Dict = index + i _snake_case : List[str] = [2] for i in range(3 , snake_case__ , 2 ): if is_prime[i]: primes.append(snake_case__ ) return primes def UpperCAmelCase__ (snake_case__ : int = 99_99_66_66_33_33 ): """simple docstring""" _snake_case : Optional[Any] = math.floor(math.sqrt(snake_case__ ) ) + 1_00 _snake_case : List[str] = prime_sieve(snake_case__ ) _snake_case : Optional[int] = 0 _snake_case : Optional[int] = 0 _snake_case : Union[str, Any] = primes[prime_index] while (last_prime**2) <= limit: _snake_case : int = primes[prime_index + 1] _snake_case : Union[str, Any] = last_prime**2 _snake_case : str = next_prime**2 # Get numbers divisible by lps(current) _snake_case : Dict = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) _snake_case : Union[str, Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps _snake_case : Any = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair _snake_case : str = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
64
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "AutoImageProcessor" lowercase__ = "AutoTokenizer" def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ): '''simple docstring''' _snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : str = kwargs.pop("""feature_extractor""" ) _snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) _snake_case : Dict = self.image_processor _snake_case : Any = False def __call__( self: Any, *a_: Any, **a_: Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*a_, **a_ ) _snake_case : Dict = kwargs.pop("""images""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""text""", a_ ) if len(a_ ) > 0: _snake_case : Optional[int] = args[0] _snake_case : Tuple = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _snake_case : Tuple = self.image_processor(a_, *a_, **a_ ) if text is not None: _snake_case : Tuple = self.tokenizer(a_, **a_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case : List[str] = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @contextmanager def UpperCamelCase_ ( self: Dict ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _snake_case : Any = True _snake_case : Optional[int] = self.tokenizer yield _snake_case : int = self.image_processor _snake_case : Optional[int] = False def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ): '''simple docstring''' if added_vocab is None: _snake_case : Dict = self.tokenizer.get_added_vocab() _snake_case : str = {} while tokens: _snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE ) if start_token is None: break _snake_case : List[Any] = start_token.group(1 ) _snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE ) _snake_case : Dict = start_token.group() if end_token is None: _snake_case : List[Any] = tokens.replace(a_, """""" ) else: _snake_case : List[str] = end_token.group() _snake_case : str = re.escape(a_ ) _snake_case : str = re.escape(a_ ) _snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE ) if content is not None: _snake_case : int = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ ) if value: if len(a_ ) == 1: _snake_case : List[str] = value[0] _snake_case : List[str] = value else: # leaf nodes _snake_case : Tuple = [] for leaf in content.split(r"""<sep/>""" ): _snake_case : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _snake_case : int = leaf[1:-2] # for categorical special tokens output[key].append(a_ ) if len(output[key] ) == 1: _snake_case : int = output[key][0] _snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ ) if len(a_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
64
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : int = 0.00 _snake_case : int = 0 for resistor in resistors: if resistor <= 0: _snake_case : Dict = F"Resistor at index {index} has a negative or zero value!" raise ValueError(snake_case__ ) first_sum += 1 / float(snake_case__ ) index += 1 return 1 / first_sum def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : Union[str, Any] = 0.00 _snake_case : Any = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _snake_case : Any = F"Resistor at index {index} has a negative value!" raise ValueError(snake_case__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
64
1
"""simple docstring""" import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class lowercase( ctypes.Structure ): '''simple docstring''' lowercase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def UpperCAmelCase__ (): """simple docstring""" if os.name == "nt": _snake_case : List[Any] = CursorInfo() _snake_case : int = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) ) _snake_case : Any = False ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25l""" ) sys.stdout.flush() def UpperCAmelCase__ (): """simple docstring""" if os.name == "nt": _snake_case : List[Any] = CursorInfo() _snake_case : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) ) _snake_case : List[str] = True ctypes.windll.kernelaa.SetConsoleCursorInfo(snake_case__ , ctypes.byref(snake_case__ ) ) elif os.name == "posix": sys.stdout.write("""\033[?25h""" ) sys.stdout.flush() @contextmanager def UpperCAmelCase__ (): """simple docstring""" try: hide_cursor() yield finally: show_cursor()
64
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } A_ = { '''Salesforce/codegen-350M-mono''': 20_48, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] lowercase__ = CodeGenTokenizer def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ): '''simple docstring''' super().__init__( a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, ) if kwargs.pop("""add_bos_token""", a_ ): _snake_case : str = kwargs.pop("""name_or_path""", """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) _snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space: _snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) ) _snake_case : Dict = add_prefix_space _snake_case : str = pre_tok_class(**a_ ) _snake_case : List[Any] = add_prefix_space def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ): '''simple docstring''' _snake_case : Dict = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ ) def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ): '''simple docstring''' _snake_case : Any = super().decode( token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, ) if truncate_before_pattern is not None and len(a_ ) > 0: _snake_case : List[str] = self.truncate(a_, a_ ) return decoded_text def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ): '''simple docstring''' def find_re(a_: Dict, a_: str, a_: Union[str, Any] ): _snake_case : Any = pattern.search(a_, a_ ) return m.start() if m else -1 _snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern] _snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : int = completion[: prints[1].start()] _snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : List[Any] = completion[: defs[1].start()] _snake_case : int = 0 _snake_case : List[Any] = [ pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1 ] if len(a_ ) > 0: return completion[: min(a_ )] else: return completion
64
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A_ = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _snake_case : Tuple = 1_92 _snake_case : Any = 7_68 _snake_case : Any = 12 _snake_case : List[Any] = 3 _snake_case : int = [8_00, 13_33] _snake_case : Tuple = False elif yolos_name == "yolos_s_dWr": _snake_case : Tuple = 3_30 _snake_case : List[str] = 14 _snake_case : List[str] = 6 _snake_case : Union[str, Any] = 13_20 elif "yolos_s" in yolos_name: _snake_case : Union[str, Any] = 3_84 _snake_case : List[str] = 15_36 _snake_case : Any = 12 _snake_case : Optional[int] = 6 elif "yolos_b" in yolos_name: _snake_case : Dict = [8_00, 13_44] _snake_case : str = 91 _snake_case : Optional[Any] = """huggingface/label-files""" _snake_case : str = """coco-detection-id2label.json""" _snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : List[str] = idalabel _snake_case : List[str] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _snake_case : Any = in_proj_weight[: config.hidden_size, :] _snake_case : Optional[Any] = in_proj_bias[: config.hidden_size] _snake_case : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case : Tuple = in_proj_weight[-config.hidden_size :, :] _snake_case : List[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" if "backbone" in name: _snake_case : str = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: _snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: _snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: _snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: _snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: _snake_case : str = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: _snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _snake_case : str = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _snake_case : str = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: _snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: _snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: _snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ): """simple docstring""" for key in orig_state_dict.copy().keys(): _snake_case : List[str] = orig_state_dict.pop(snake_case__ ) if "qkv" in key: _snake_case : Optional[Any] = key.split(""".""" ) _snake_case : Optional[Any] = int(key_split[2] ) _snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _snake_case : str = val[:dim, :] _snake_case : Optional[Any] = val[ dim : dim * 2, : ] _snake_case : Optional[Any] = val[-dim:, :] else: _snake_case : Dict = val[:dim] _snake_case : Any = val[dim : dim * 2] _snake_case : Dict = val[-dim:] else: _snake_case : Tuple = val return orig_state_dict def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ): """simple docstring""" _snake_case : Optional[Any] = get_yolos_config(snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # load 🤗 model _snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ ) model.eval() _snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by YolosImageProcessor _snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12 _snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ ) _snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) _snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes _snake_case , _snake_case : Dict = None, None if yolos_name == "yolos_ti": _snake_case : Optional[Any] = torch.tensor( [[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] ) _snake_case : Tuple = torch.tensor( [[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] ) elif yolos_name == "yolos_s_200_pre": _snake_case : List[str] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ) _snake_case : List[str] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ) elif yolos_name == "yolos_s_300_pre": _snake_case : Dict = torch.tensor( [[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] ) _snake_case : Union[str, Any] = torch.tensor( [[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] ) elif yolos_name == "yolos_s_dWr": _snake_case : Tuple = torch.tensor( [[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] ) _snake_case : Optional[Any] = torch.tensor( [[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] ) elif yolos_name == "yolos_base": _snake_case : int = torch.tensor( [[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] ) _snake_case : Optional[int] = torch.tensor( [[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: _snake_case : Dict = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) _snake_case : str = model_mapping[yolos_name] image_processor.push_to_hub(snake_case__ , organization="""hustvl""" ) model.push_to_hub(snake_case__ , organization="""hustvl""" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A_ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
64
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "LayoutLMv2ImageProcessor" lowercase__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self: Optional[int], a_: Union[str, Any]=None, a_: Union[str, Any]=None, **a_: str ): '''simple docstring''' if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : List[Any] = kwargs.pop("""feature_extractor""" ) _snake_case : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) def __call__( self: List[Any], a_: Union[str, Any], a_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, a_: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, a_: Union[List[List[int]], List[List[List[int]]]] = None, a_: Optional[Union[List[int], List[List[int]]]] = None, a_: bool = True, a_: Union[bool, str, PaddingStrategy] = False, a_: Union[bool, str, TruncationStrategy] = None, a_: Optional[int] = None, a_: int = 0, a_: Optional[int] = None, a_: Optional[bool] = None, a_: Optional[bool] = None, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = True, a_: Optional[Union[str, TensorType]] = None, **a_: List[Any], ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes """ """if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" ) # first, apply the image processor _snake_case : List[Any] = self.image_processor(images=a_, return_tensors=a_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a_, a_ ): _snake_case : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) _snake_case : Union[str, Any] = features["""words"""] _snake_case : List[Any] = self.tokenizer( text=text if text is not None else features["""words"""], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["""boxes"""], word_labels=a_, add_special_tokens=a_, padding=a_, truncation=a_, max_length=a_, stride=a_, pad_to_multiple_of=a_, return_token_type_ids=a_, return_attention_mask=a_, return_overflowing_tokens=a_, return_special_tokens_mask=a_, return_offsets_mapping=a_, return_length=a_, verbose=a_, return_tensors=a_, **a_, ) # add pixel values _snake_case : int = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: _snake_case : int = self.get_overflowing_images(a_, encoded_inputs["""overflow_to_sample_mapping"""] ) _snake_case : Optional[int] = images return encoded_inputs def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[int] = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a_ ) != len(a_ ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f" {len(a_ )} and {len(a_ )}" ) return images_with_overflow def UpperCamelCase_ ( self: Union[str, Any], *a_: List[str], **a_: Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: List[str], *a_: Tuple, **a_: Dict ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "image"] @property def UpperCamelCase_ ( self: int ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: Dict ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ): """simple docstring""" _snake_case : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _snake_case : List[Any] = """""" else: _snake_case : List[Any] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) _snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _snake_case : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] _snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size] _snake_case : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] _snake_case : List[str] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : List[str] = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Union[str, Any] = val def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" _snake_case : str = ViTMSNConfig() _snake_case : Any = 10_00 _snake_case : Tuple = """datasets/huggingface/label-files""" _snake_case : Dict = """imagenet-1k-id2label.json""" _snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) ) _snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : List[Any] = idalabel _snake_case : str = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _snake_case : Tuple = 3_84 _snake_case : Dict = 15_36 _snake_case : Tuple = 6 elif "l16" in checkpoint_url: _snake_case : Any = 10_24 _snake_case : int = 40_96 _snake_case : str = 24 _snake_case : Optional[int] = 16 _snake_case : List[Any] = 0.1 elif "b4" in checkpoint_url: _snake_case : Tuple = 4 elif "l7" in checkpoint_url: _snake_case : int = 7 _snake_case : Dict = 10_24 _snake_case : Optional[Any] = 40_96 _snake_case : Any = 24 _snake_case : Union[str, Any] = 16 _snake_case : Optional[int] = 0.1 _snake_case : int = ViTMSNModel(snake_case__ ) _snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""] _snake_case : List[str] = ViTImageProcessor(size=config.image_size ) remove_projection_head(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() _snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) _snake_case : str = ViTImageProcessor( size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _snake_case : int = model(**snake_case__ ) _snake_case : List[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: _snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: _snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: _snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: _snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
64
1
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = IFInpaintingSuperResolutionPipeline lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowercase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) lowercase__ = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCamelCase_ ( self: str, a_: str, a_: Tuple=0 ): '''simple docstring''' if str(a_ ).startswith("""mps""" ): _snake_case : Union[str, Any] = torch.manual_seed(a_ ) else: _snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : Optional[Any] = floats_tensor((1, 3, 16, 16), rng=random.Random(a_ ) ).to(a_ ) _snake_case : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) _snake_case : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ ) _snake_case : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""", reason="""float16 requires CUDA""" ) def UpperCamelCase_ ( self: str ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self._test_save_load_local() def UpperCamelCase_ ( self: int ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2, )
64
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" _snake_case : Optional[Any] = list(snake_case__ ) _snake_case : List[Any] = list(snake_case__ ) _snake_case : List[Any] = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count += 1 _snake_case : Any = """_""" if count > 1: return False else: return "".join(snake_case__ ) def UpperCAmelCase__ (snake_case__ : list[str] ): """simple docstring""" _snake_case : int = [] while True: _snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ ) _snake_case : int = [] for i in range(len(snake_case__ ) ): for j in range(i + 1 , len(snake_case__ ) ): _snake_case : List[Any] = compare_string(binary[i] , binary[j] ) if k is False: _snake_case : Dict = """*""" _snake_case : List[Any] = """*""" temp.append("""X""" ) for i in range(len(snake_case__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case__ ) == 0: return pi _snake_case : Optional[int] = list(set(snake_case__ ) ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ): """simple docstring""" _snake_case : Optional[int] = [] for minterm in minterms: _snake_case : Any = """""" for _ in range(snake_case__ ): _snake_case : Optional[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case__ ) return temp def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ): """simple docstring""" _snake_case : Dict = list(snake_case__ ) _snake_case : List[str] = list(snake_case__ ) _snake_case : Tuple = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ): """simple docstring""" _snake_case : Any = [] _snake_case : Union[str, Any] = [0] * len(snake_case__ ) for i in range(len(chart[0] ) ): _snake_case : Tuple = 0 _snake_case : str = -1 for j in range(len(snake_case__ ) ): if chart[j][i] == 1: count += 1 _snake_case : Union[str, Any] = j if count == 1: _snake_case : Union[str, Any] = 1 for i in range(len(snake_case__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case__ ) ): _snake_case : List[Any] = 0 temp.append(prime_implicants[i] ) while True: _snake_case : Optional[int] = 0 _snake_case : str = -1 _snake_case : Any = 0 for i in range(len(snake_case__ ) ): _snake_case : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _snake_case : Dict = count_n _snake_case : Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case__ ) ): _snake_case : Optional[Any] = 0 def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ): """simple docstring""" _snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )] for i in range(len(snake_case__ ) ): _snake_case : Any = prime_implicants[i].count("""_""" ) for j in range(len(snake_case__ ) ): if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ): _snake_case : Tuple = 1 return chart def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = int(input("""Enter the no. of variables\n""" ) ) _snake_case : List[str] = [ float(snake_case__ ) for x in input( """Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split() ] _snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ ) _snake_case : str = check(snake_case__ ) print("""Prime Implicants are:""" ) print(snake_case__ ) _snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ ) _snake_case : str = selection(snake_case__ , snake_case__ ) print("""Essential Prime Implicants are:""" ) print(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
64
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class lowercase: '''simple docstring''' def __init__( self: Optional[Any], a_: Tuple, a_: Any=13, a_: Any=7, a_: Tuple=True, a_: int=True, a_: Union[str, Any]=True, a_: Optional[int]=True, a_: int=99, a_: Tuple=64, a_: Union[str, Any]=5, a_: Optional[int]=4, a_: int=37, a_: str="gelu", a_: Tuple=0.1, a_: Any=0.1, a_: Tuple=512, a_: Optional[Any]=16, a_: Union[str, Any]=2, a_: int=0.02, a_: List[Any]=3, a_: List[Any]=4, a_: Tuple=None, ): '''simple docstring''' _snake_case : Optional[int] = parent _snake_case : Tuple = batch_size _snake_case : List[Any] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Union[str, Any] = use_input_mask _snake_case : List[str] = use_token_type_ids _snake_case : Optional[int] = use_labels _snake_case : Optional[Any] = vocab_size _snake_case : str = hidden_size _snake_case : Tuple = num_hidden_layers _snake_case : Optional[Any] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Union[str, Any] = hidden_act _snake_case : Optional[Any] = hidden_dropout_prob _snake_case : Optional[int] = attention_probs_dropout_prob _snake_case : Any = max_position_embeddings _snake_case : Optional[Any] = type_vocab_size _snake_case : Tuple = type_sequence_label_size _snake_case : Optional[int] = initializer_range _snake_case : int = num_labels _snake_case : Dict = num_choices _snake_case : str = scope _snake_case : Optional[int] = vocab_size - 1 def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) _snake_case : int = None if self.use_input_mask: _snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case : Optional[int] = None if self.use_labels: _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) _snake_case : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def UpperCamelCase_ ( self: str ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a_, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : Dict = self.prepare_config_and_inputs() _snake_case : Optional[Any] = True return config, input_ids, input_mask, token_labels def UpperCamelCase_ ( self: Tuple, a_: int, a_: List[Any], a_: Dict ): '''simple docstring''' _snake_case : Optional[Any] = GPTNeoXModel(config=a_ ) model.to(a_ ) model.eval() _snake_case : Union[str, Any] = model(a_, attention_mask=a_ ) _snake_case : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, Any], a_: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = True _snake_case : Any = GPTNeoXModel(a_ ) model.to(a_ ) model.eval() _snake_case : Optional[Any] = model(a_, attention_mask=a_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self: Optional[Any], a_: Optional[int], a_: int, a_: Union[str, Any], a_: Any ): '''simple docstring''' _snake_case : Optional[Any] = GPTNeoXForCausalLM(config=a_ ) model.to(a_ ) model.eval() _snake_case : str = model(a_, attention_mask=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self: Union[str, Any], a_: int, a_: List[Any], a_: Union[str, Any], a_: str ): '''simple docstring''' _snake_case : int = self.num_labels _snake_case : Optional[int] = GPTNeoXForQuestionAnswering(a_ ) model.to(a_ ) model.eval() _snake_case : Tuple = model(a_, attention_mask=a_ ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self: Dict, a_: Dict, a_: Optional[int], a_: str, a_: str ): '''simple docstring''' _snake_case : Optional[int] = self.num_labels _snake_case : Union[str, Any] = GPTNeoXForSequenceClassification(a_ ) model.to(a_ ) model.eval() _snake_case : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) _snake_case : List[Any] = model(a_, attention_mask=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase_ ( self: str, a_: Optional[Any], a_: Tuple, a_: List[Any], a_: List[str] ): '''simple docstring''' _snake_case : Optional[Any] = self.num_labels _snake_case : List[str] = GPTNeoXForTokenClassification(a_ ) model.to(a_ ) model.eval() _snake_case : str = model(a_, attention_mask=a_, labels=a_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self: List[str], a_: Dict, a_: str, a_: Optional[Any] ): '''simple docstring''' _snake_case : Any = True _snake_case : Optional[int] = GPTNeoXForCausalLM(config=a_ ) model.to(a_ ) model.eval() # first forward pass _snake_case : Optional[Any] = model(a_, attention_mask=a_, use_cache=a_ ) _snake_case : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _snake_case : Dict = ids_tensor((self.batch_size, 3), config.vocab_size ) _snake_case : Optional[Any] = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and _snake_case : Optional[Any] = torch.cat([input_ids, next_tokens], dim=-1 ) _snake_case : List[Any] = torch.cat([input_mask, next_mask], dim=-1 ) _snake_case : int = model(a_, attention_mask=a_, output_hidden_states=a_ ) _snake_case : str = output_from_no_past["""hidden_states"""][0] _snake_case : Union[str, Any] = model( a_, attention_mask=a_, past_key_values=a_, output_hidden_states=a_, )["""hidden_states"""][0] # select random slice _snake_case : Any = ids_tensor((1,), output_from_past.shape[-1] ).item() _snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach() _snake_case : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_, a_, atol=1E-3 ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Any = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case , _snake_case : Dict = config_and_inputs _snake_case : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase( __a , __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase__ = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase__ = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = GPTNeoXModelTester(self ) _snake_case : List[str] = ConfigTester(self, config_class=a_, hidden_size=64, num_attention_heads=8 ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a_, a_, a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(a_, a_, a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() _snake_case : str = None self.model_tester.create_and_check_model_as_decoder(a_, a_, a_ ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(a_, a_, a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a_ ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a_ ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a_ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCamelCase_ ( self: Optional[int], a_: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Optional[Any] = ids_tensor([1, 10], config.vocab_size ) _snake_case : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _snake_case : List[str] = GPTNeoXModel(a_ ) original_model.to(a_ ) original_model.eval() _snake_case : Dict = original_model(a_ ).last_hidden_state _snake_case : List[Any] = original_model(a_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _snake_case : Optional[int] = {"""type""": scaling_type, """factor""": 10.0} _snake_case : Union[str, Any] = GPTNeoXModel(a_ ) scaled_model.to(a_ ) scaled_model.eval() _snake_case : Optional[int] = scaled_model(a_ ).last_hidden_state _snake_case : Dict = scaled_model(a_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a_, a_, atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a_, a_, atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a_, a_, atol=1E-5 ) ) @require_torch class lowercase( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[str] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: _snake_case : Dict = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(a_ ) _snake_case : Optional[int] = tokenizer("""My favorite food is""", return_tensors="""pt""" ).to(a_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _snake_case : Tuple = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" _snake_case : str = model.generate(**a_, do_sample=a_, max_new_tokens=20 ) _snake_case : List[Any] = tokenizer.batch_decode(a_ )[0] self.assertEqual(a_, a_ )
64
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" stooge(snake_case__ , 0 , len(snake_case__ ) - 1 ) return arr def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case : Tuple = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case : Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case__ , i + t , (snake_case__) ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
64
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class lowercase( nn.Module ): '''simple docstring''' def __init__( self: List[str] ): '''simple docstring''' super().__init__() _snake_case : Optional[Any] = nn.Linear(3, 4 ) _snake_case : List[Any] = nn.BatchNormad(4 ) _snake_case : Any = nn.Linear(4, 5 ) def UpperCamelCase_ ( self: str, a_: Optional[Any] ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(a_ ) ) ) class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, model.state_dict() ) _snake_case : Dict = os.path.join(a_, """index.json""" ) self.assertTrue(os.path.isfile(a_ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _snake_case : Tuple = os.path.join(a_, f"{key}.dat" ) self.assertTrue(os.path.isfile(a_ ) ) # TODO: add tests on the fact weights are properly loaded def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _snake_case : List[Any] = torch.randn(2, 3, dtype=a_ ) with TemporaryDirectory() as tmp_dir: _snake_case : Optional[int] = offload_weight(a_, """weight""", a_, {} ) _snake_case : Optional[int] = os.path.join(a_, """weight.dat""" ) self.assertTrue(os.path.isfile(a_ ) ) self.assertDictEqual(a_, {"""weight""": {"""shape""": [2, 3], """dtype""": str(a_ ).split(""".""" )[1]}} ) _snake_case : int = load_offloaded_weight(a_, index["""weight"""] ) self.assertTrue(torch.equal(a_, a_ ) ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : int = ModelForTest() _snake_case : int = model.state_dict() _snake_case : Any = {k: v for k, v in state_dict.items() if """linear2""" not in k} _snake_case : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) _snake_case : Optional[int] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) _snake_case : Tuple = {k: v for k, v in state_dict.items() if """weight""" in k} _snake_case : List[str] = {k: v for k, v in state_dict.items() if """weight""" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) _snake_case : Dict = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(a_, a_ ) # Duplicates are removed _snake_case : int = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ ) # Every key is there with the right value self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(a_, weight_map[key] ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : int = {"""a.1""": 0, """a.10""": 1, """a.2""": 2} _snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] ) self.assertDictEqual(a_, {"""a.1""": 0, """a.2""": 2} ) _snake_case : Optional[int] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2} _snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] ) self.assertDictEqual(a_, {"""a.1.a""": 0, """a.2.a""": 2} )
64
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["note_seq"] def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ): '''simple docstring''' requires_backends(self, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] )
64
1
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ): """simple docstring""" for attribute in key.split(""".""" ): _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: _snake_case : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _snake_case : int = value elif weight_type == "weight_g": _snake_case : str = value elif weight_type == "weight_v": _snake_case : Tuple = value elif weight_type == "bias": _snake_case : List[str] = value else: _snake_case : int = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ): """simple docstring""" _snake_case : List[Any] = [] _snake_case : Optional[Any] = fairseq_model.state_dict() _snake_case : str = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _snake_case : Optional[Any] = None for name, value in fairseq_dict.items(): _snake_case : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _snake_case : Dict = True elif name.split(""".""" )[0] == "proj": _snake_case : Dict = fairseq_model.proj _snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2] _snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ ) if "weight_g" in name: _snake_case : str = """weight_g""" elif "weight_v" in name: _snake_case : Optional[Any] = """weight_v""" elif "bias" in name: _snake_case : Union[str, Any] = """bias""" elif "weight" in name: _snake_case : int = """weight""" else: _snake_case : Optional[int] = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) return proj_weight def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ): """simple docstring""" _snake_case : Any = full_name.split("""conv_layers.""" )[-1] _snake_case : Optional[int] = name.split(""".""" ) _snake_case : List[str] = int(items[0] ) _snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _snake_case : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _snake_case : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) _snake_case : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) _snake_case : List[str] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case : Optional[Any] = emb.weight.shape _snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) _snake_case : Union[str, Any] = emb.weight.data return lin_layer def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: _snake_case : Any = f.readlines() _snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines] _snake_case : str = len(snake_case__ ) _snake_case : Tuple = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ): """simple docstring""" _snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ ) _snake_case : List[str] = SpeechaTextaConfig.from_pretrained( snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ ) _snake_case : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) _snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) _snake_case : Optional[Any] = model[0].eval() # set weights for wav2vec2 encoder _snake_case : Any = WavaVecaModel(snake_case__ ) _snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ ) _snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ ) _snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) _snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) _snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ ) _snake_case : Any = False # add projection layer _snake_case : int = nn.Parameter(projection_layer.weight ) _snake_case : Any = nn.Parameter(projection_layer.bias ) _snake_case : Any = create_vocab_dict(snake_case__ ) with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp: json.dump(snake_case__ , snake_case__ ) _snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) ) tokenizer.save_pretrained(snake_case__ ) _snake_case : str = hf_wavavec.config.to_dict() _snake_case : List[str] = tokenizer.pad_token_id _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Union[str, Any] = tokenizer.eos_token_id _snake_case : Optional[Any] = """speech_to_text_2""" _snake_case : Optional[int] = """wav2vec2""" _snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ ) hf_wavavec.save_pretrained(snake_case__ ) feature_extractor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') A_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
64
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: List[str] ): '''simple docstring''' _snake_case : int = data _snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0] @staticmethod def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0Xffffffff def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) _snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) ) return padded_data def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 ) ] def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64 for i in range(16, 80 ): _snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 ) return w def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = self.padding() _snake_case : str = self.split_blocks() for block in self.blocks: _snake_case : Any = self.expand_block(a_ ) _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h for i in range(0, 80 ): if 0 <= i < 20: _snake_case : int = (b & c) | ((~b) & d) _snake_case : str = 0X5a827999 elif 20 <= i < 40: _snake_case : Optional[int] = b ^ c ^ d _snake_case : str = 0X6ed9eba1 elif 40 <= i < 60: _snake_case : List[Any] = (b & c) | (b & d) | (c & d) _snake_case : List[Any] = 0X8f1bbcdc elif 60 <= i < 80: _snake_case : List[Any] = b ^ c ^ d _snake_case : int = 0Xca62c1d6 _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = ( self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff, a, self.rotate(a_, 30 ), c, d, ) _snake_case : Union[str, Any] = ( self.h[0] + a & 0Xffffffff, self.h[1] + b & 0Xffffffff, self.h[2] + c & 0Xffffffff, self.h[3] + d & 0Xffffffff, self.h[4] + e & 0Xffffffff, ) return ("{:08x}" * 5).format(*self.h ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = B"""Test String""" assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324 def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _snake_case : Union[str, Any] = parser.parse_args() _snake_case : List[Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _snake_case : str = f.read() else: _snake_case : int = bytes(snake_case__ , """utf-8""" ) print(SHAaHash(snake_case__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
64
1
"""simple docstring""" import functools from typing import Any def UpperCAmelCase__ (snake_case__ : str , snake_case__ : list[str] ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(snake_case__ , snake_case__ ) or not all( isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie _snake_case : dict[str, Any] = {} _snake_case : str = """WORD_KEEPER""" for word in words: _snake_case : int = trie for c in word: if c not in trie_node: _snake_case : str = {} _snake_case : int = trie_node[c] _snake_case : str = True _snake_case : Optional[int] = len(snake_case__ ) # Dynamic programming method @functools.cache def is_breakable(snake_case__ : int ) -> bool: if index == len_string: return True _snake_case : Any = trie for i in range(snake_case__ , snake_case__ ): _snake_case : int = trie_node.get(string[i] , snake_case__ ) if trie_node is None: return False if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
64
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(__a ) class lowercase( __a ): '''simple docstring''' lowercase__ = "rag" lowercase__ = True def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ): '''simple docstring''' super().__init__( bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" ) _snake_case : List[str] = question_encoder_config.pop("""model_type""" ) _snake_case : Union[str, Any] = kwargs.pop("""generator""" ) _snake_case : Any = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig _snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Any = reduce_loss _snake_case : Optional[int] = label_smoothing _snake_case : Dict = exclude_bos_score _snake_case : int = do_marginalize _snake_case : Optional[Any] = title_sep _snake_case : Any = doc_sep _snake_case : List[str] = n_docs _snake_case : Tuple = max_combined_length _snake_case : Optional[Any] = dataset _snake_case : Union[str, Any] = dataset_split _snake_case : Tuple = index_name _snake_case : Any = retrieval_vector_size _snake_case : Union[str, Any] = retrieval_batch_size _snake_case : str = passages_path _snake_case : Tuple = index_path _snake_case : List[Any] = use_dummy_dataset _snake_case : Optional[Any] = output_retrieved _snake_case : Tuple = do_deduplication _snake_case : Union[str, Any] = use_cache if self.forced_eos_token_id is None: _snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ ) @classmethod def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : List[str] = self.question_encoder.to_dict() _snake_case : Tuple = self.generator.to_dict() _snake_case : Dict = self.__class__.model_type return output
64
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union A_ = TypeVar('''T''') A_ = Union[List[T], Tuple[T, ...]] A_ = Union[T, List[T], Dict[str, T]] A_ = Union[str, bytes, os.PathLike]
64
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class lowercase( __a ): '''simple docstring''' lowercase__ = "biogpt" def __init__( self: Dict, a_: List[Any]=42_384, a_: int=1_024, a_: Optional[int]=24, a_: List[str]=16, a_: Optional[Any]=4_096, a_: int="gelu", a_: int=0.1, a_: List[Any]=0.1, a_: Any=1_024, a_: Optional[Any]=0.02, a_: Dict=1E-12, a_: Tuple=True, a_: Any=True, a_: Tuple=0.0, a_: str=0.0, a_: int=1, a_: Any=0, a_: List[str]=2, **a_: Optional[int], ): '''simple docstring''' _snake_case : Dict = vocab_size _snake_case : int = max_position_embeddings _snake_case : Optional[int] = hidden_size _snake_case : Any = num_hidden_layers _snake_case : List[str] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : Dict = hidden_act _snake_case : Tuple = hidden_dropout_prob _snake_case : Tuple = attention_probs_dropout_prob _snake_case : Optional[Any] = initializer_range _snake_case : List[str] = layer_norm_eps _snake_case : List[str] = scale_embedding _snake_case : Dict = use_cache _snake_case : Optional[int] = layerdrop _snake_case : Any = activation_dropout super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
64
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] _snake_case : List[Any] = [] def generate(snake_case__ : int , snake_case__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even _snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i] else: # k is odd _snake_case , _snake_case : List[str] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
64
1
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil A_ = 1_00 A_ = set(range(3, NUM_PRIMES, 2)) primes.add(2) A_ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} _snake_case : set[int] = set() _snake_case : int _snake_case : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def UpperCAmelCase__ (snake_case__ : int = 50_00 ): """simple docstring""" for number_to_partition in range(1 , snake_case__ ): if len(partition(snake_case__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
64
"""simple docstring""" from math import factorial A_ = {str(d): factorial(d) for d in range(10)} def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i ) if __name__ == "__main__": print(F'''{solution() = }''')
64
1
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy A_ = logging.getLogger(__name__) A_ = '''pytorch_model.bin''' @dataclasses.dataclass class lowercase: '''simple docstring''' lowercase__ = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , ) @dataclasses.dataclass class lowercase: '''simple docstring''' lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "A csv or a json file containing the validation data."} ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "The name of the task to train on."} , ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class lowercase: '''simple docstring''' lowercase__ = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) lowercase__ = dataclasses.field( default="accuracy" , metadata={"help": "The evaluation metric used for the task."} ) lowercase__ = dataclasses.field( default="no" , metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } , ) lowercase__ = dataclasses.field( default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) lowercase__ = dataclasses.field( default=0.0 , metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } , ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , ) lowercase__ = dataclasses.field( default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , ) lowercase__ = dataclasses.field( default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , ) lowercase__ = dataclasses.field( default=__a , metadata={"help": "Random seed for initialization."} , ) def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: _snake_case : str = dataset.filter(lambda snake_case__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 _snake_case : Optional[Any] = int(eval_result * len(snake_case__ ) ) print(snake_case__ ) _snake_case : Union[str, Any] = dataset.sort("""probability""" , reverse=snake_case__ ) _snake_case : int = dataset.select(range(snake_case__ ) ) _snake_case : Dict = dataset.remove_columns(["""label""", """probability"""] ) _snake_case : int = dataset.rename_column("""prediction""" , """label""" ) _snake_case : Dict = dataset.map(lambda snake_case__ : {"label": idalabel[example["label"]]} ) _snake_case : Optional[int] = dataset.shuffle(seed=args.seed ) _snake_case : List[Any] = os.path.join(snake_case__ , F"train_pseudo.{args.data_file_extension}" ) if args.data_file_extension == "csv": dataset.to_csv(snake_case__ , index=snake_case__ ) else: dataset.to_json(snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[Any] , **snake_case__ : int ): """simple docstring""" _snake_case : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() _snake_case : List[str] = STModelArguments(model_name_or_path=snake_case__ ) _snake_case : Union[str, Any] = STDataArguments(train_file=snake_case__ , infer_file=snake_case__ ) _snake_case : List[Any] = STTrainingArguments(output_dir=snake_case__ ) _snake_case : int = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(snake_case__ ).items(): setattr(snake_case__ , snake_case__ , snake_case__ ) for key, value in kwargs.items(): if hasattr(snake_case__ , snake_case__ ): setattr(snake_case__ , snake_case__ , snake_case__ ) # Sanity checks _snake_case : str = {} _snake_case : int = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None _snake_case : Any = args.train_file _snake_case : List[str] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None _snake_case : Tuple = args.eval_file for key in data_files: _snake_case : Tuple = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: _snake_case : Tuple = extension else: assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) _snake_case : Any = F"{args.output_dir}/self-train_iter-{{}}".format _snake_case : Optional[int] = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) accelerator.wait_for_everyone() _snake_case : str = None _snake_case : Union[str, Any] = None _snake_case : Optional[int] = 0 _snake_case : Optional[Any] = False # Show the progress bar _snake_case : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): _snake_case : List[str] = data_dir_format(snake_case__ ) assert os.path.exists(snake_case__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 _snake_case : Optional[int] = os.path.join(snake_case__ , """stage-1""" ) _snake_case : Tuple = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(snake_case__ , snake_case__ ): arguments_dict.update({key: value} ) _snake_case : List[str] = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , snake_case__ , snake_case__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , snake_case__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data _snake_case : Any = os.path.join(snake_case__ , """best-checkpoint""" ) _snake_case : List[str] = os.path.join(snake_case__ , """stage-2""" ) # Update arguments_dict _snake_case : Union[str, Any] = model_path _snake_case : Union[str, Any] = data_files["""train"""] _snake_case : Union[str, Any] = current_output_dir _snake_case : Dict = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__ ) if os.path.exists(snake_case__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , snake_case__ , snake_case__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , snake_case__ ) finetune(**snake_case__ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case__ ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , snake_case__ ) _snake_case : Any = iteration _snake_case : Any = data_dir_format(iteration + 1 ) _snake_case : Dict = AutoConfig.from_pretrained(os.path.join(snake_case__ , """best-checkpoint""" ) ) _snake_case : List[Any] = config.idalabel _snake_case : Optional[Any] = os.path.join(snake_case__ , """eval_results_best-checkpoint.json""" ) _snake_case : int = os.path.join(snake_case__ , """test_results_best-checkpoint.json""" ) assert os.path.exists(snake_case__ ) with open(snake_case__ , """r""" ) as f: _snake_case : Any = float(json.load(snake_case__ )[args.eval_metric] ) _snake_case : List[str] = os.path.join(snake_case__ , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(snake_case__ ) # Loading the dataset from local csv or json files. _snake_case : List[str] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] _snake_case : Optional[Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(snake_case__ , exist_ok=snake_case__ ) shutil.copy(snake_case__ , os.path.join(snake_case__ , F"eval_results_iter-{iteration}.json" ) ) if os.path.exists(snake_case__ ): shutil.copy(snake_case__ , os.path.join(snake_case__ , F"test_results_iter-{iteration}.json" ) ) create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) accelerator.wait_for_everyone() _snake_case : Any = os.path.join(snake_case__ , F"train_pseudo.{args.data_file_extension}" ) if args.evaluation_strategy != IntervalStrategy.NO.value: _snake_case : Union[str, Any] = eval_result if best_iteration is None: _snake_case : List[Any] = new_iteration _snake_case : List[Any] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: _snake_case : Dict = new_iteration _snake_case : List[str] = new_eval_result _snake_case : Dict = 0 else: if new_eval_result == best_eval_result: _snake_case : Union[str, Any] = new_iteration _snake_case : int = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: _snake_case : str = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , snake_case__ ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F"eval_results_iter-{iteration}.json" ) , os.path.join(snake_case__ , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case__ , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(snake_case__ , """eval_results_best-iteration.json""" ) , )
64
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ): """simple docstring""" if len(snake_case__ ) < k or k < 0: raise ValueError("""Invalid Input""" ) _snake_case : Optional[int] = sum(array[:k] ) for i in range(len(snake_case__ ) - k ): _snake_case : Optional[Any] = current_sum - array[i] + array[i + k] _snake_case : List[str] = max(snake_case__ , snake_case__ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() A_ = [randint(-10_00, 10_00) for i in range(1_00)] A_ = randint(0, 1_10) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
64
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. A_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class lowercase( unittest.TestCase ): '''simple docstring''' lowercase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def UpperCamelCase_ ( self: Any, a_: List[str], a_: Optional[int], a_: Tuple ): '''simple docstring''' _snake_case : Tuple = ZeroShotClassificationPipeline( model=a_, tokenizer=a_, candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def UpperCamelCase_ ( self: Tuple, a_: Optional[int], a_: Union[str, Any] ): '''simple docstring''' _snake_case : int = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics""" ) self.assertEqual(a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ )], """scores""": [ANY(a_ )]} ) # No kwarg _snake_case : str = classifier("""Who are you voting for in 2020?""", ["""politics"""] ) self.assertEqual(a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ )], """scores""": [ANY(a_ )]} ) _snake_case : Dict = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics"""] ) self.assertEqual(a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ )], """scores""": [ANY(a_ )]} ) _snake_case : Optional[int] = classifier("""Who are you voting for in 2020?""", candidate_labels="""politics, public health""" ) self.assertEqual( a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ ), ANY(a_ )], """scores""": [ANY(a_ ), ANY(a_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 ) _snake_case : int = classifier("""Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health"""] ) self.assertEqual( a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ ), ANY(a_ )], """scores""": [ANY(a_ ), ANY(a_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ), 1.0 ) _snake_case : List[str] = classifier( """Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""This text is about {}""" ) self.assertEqual(a_, {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ )], """scores""": [ANY(a_ )]} ) # https://github.com/huggingface/transformers/issues/13846 _snake_case : Any = classifier(["""I am happy"""], ["""positive""", """negative"""] ) self.assertEqual( a_, [ {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ ), ANY(a_ )], """scores""": [ANY(a_ ), ANY(a_ )]} for i in range(1 ) ], ) _snake_case : List[str] = classifier(["""I am happy""", """I am sad"""], ["""positive""", """negative"""] ) self.assertEqual( a_, [ {"""sequence""": ANY(a_ ), """labels""": [ANY(a_ ), ANY(a_ )], """scores""": [ANY(a_ ), ANY(a_ )]} for i in range(2 ) ], ) with self.assertRaises(a_ ): classifier("""""", candidate_labels="""politics""" ) with self.assertRaises(a_ ): classifier(a_, candidate_labels="""politics""" ) with self.assertRaises(a_ ): classifier("""Who are you voting for in 2020?""", candidate_labels="""""" ) with self.assertRaises(a_ ): classifier("""Who are you voting for in 2020?""", candidate_labels=a_ ) with self.assertRaises(a_ ): classifier( """Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template="""Not formatting template""", ) with self.assertRaises(a_ ): classifier( """Who are you voting for in 2020?""", candidate_labels="""politics""", hypothesis_template=a_, ) self.run_entailment_id(a_ ) def UpperCamelCase_ ( self: Optional[int], a_: Pipeline ): '''simple docstring''' _snake_case : Any = zero_shot_classifier.model.config _snake_case : int = config.labelaid _snake_case : Optional[Any] = zero_shot_classifier.entailment_id _snake_case : int = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id, -1 ) _snake_case : List[str] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id, 0 ) _snake_case : Tuple = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id, 0 ) _snake_case : str = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id, 2 ) _snake_case : List[str] = original_labelaid self.assertEqual(a_, zero_shot_classifier.entailment_id ) @require_torch def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = pipeline( """zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100, candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = pipeline( """zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""pt""", ) _snake_case : Dict = zero_shot_classifier( """Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(a_ ), { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.333, 0.333, 0.333], }, ) @require_tf def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Tuple = pipeline( """zero-shot-classification""", model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""", framework="""tf""", ) _snake_case : int = zero_shot_classifier( """Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(a_ ), { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.333, 0.333, 0.333], }, ) @slow @require_torch def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Optional[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""pt""" ) _snake_case : Optional[Any] = zero_shot_classifier( """Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(a_ ), { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.976, 0.015, 0.009], }, ) _snake_case : List[str] = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=a_, ) self.assertEqual( nested_simplify(a_ ), { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.817, 0.713, 0.018, 0.018], }, ) @slow @require_tf def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = pipeline("""zero-shot-classification""", model="""roberta-large-mnli""", framework="""tf""" ) _snake_case : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""", candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(a_ ), { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.976, 0.015, 0.009], }, ) _snake_case : Dict = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""", candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""], multi_label=a_, ) self.assertEqual( nested_simplify(a_ ), { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.817, 0.713, 0.018, 0.018], }, )
64
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } A_ = { '''yjernite/retribert-base-uncased''': 5_12, } A_ = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = RetriBertTokenizer lowercase__ = ["input_ids", "attention_mask"] def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ): '''simple docstring''' super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, ) _snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""", a_ ) != do_lower_case or normalizer_state.get("""strip_accents""", a_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars ): _snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) ) _snake_case : List[Any] = do_lower_case _snake_case : List[str] = strip_accents _snake_case : Tuple = tokenize_chinese_chars _snake_case : Tuple = normalizer_class(**a_ ) _snake_case : List[str] = do_lower_case def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ): '''simple docstring''' _snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Union[str, Any] = [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ )
64
1
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class lowercase( unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = ort.SessionOptions() _snake_case : Union[str, Any] = False return options def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) _snake_case : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) _snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default _snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : Optional[Any] = """A red cat sitting on a park bench""" _snake_case : Optional[int] = np.random.RandomState(0 ) _snake_case : Any = pipe( prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", ) _snake_case : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
64
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = CodeGenTokenizer lowercase__ = CodeGenTokenizerFast lowercase__ = True lowercase__ = {"add_prefix_space": True} lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _snake_case : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] _snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _snake_case : List[Any] = {"""unk_token""": """<unk>"""} _snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def UpperCamelCase_ ( self: Any, **a_: int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Any, **a_: str ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = """lower newer""" _snake_case : Tuple = """lower newer""" return input_text, output_text def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) _snake_case : Optional[Any] = """lower newer""" _snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ ) self.assertListEqual(a_, a_ ) _snake_case : str = tokens + [tokenizer.unk_token] _snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return _snake_case : int = self.get_tokenizer() _snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ ) _snake_case : Dict = """lower newer""" # Testing tokenization _snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ ) _snake_case : List[str] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids without special tokens _snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids with special tokens _snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ ) _snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ ) _snake_case : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_, a_ ) # Testing the unknown token _snake_case : Tuple = tokens + [rust_tokenizer.unk_token] _snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ ) def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int, a_: List[Any]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ ) # Simple input _snake_case : Any = """This is a simple input""" _snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""] _snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""") _snake_case : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) # Pair input self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" ) # Simple input _snake_case : List[Any] = """This is a simple input""" _snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""] _snake_case : Any = ("""This is a simple input""", """This is a pair""") _snake_case : str = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] _snake_case : str = tokenizer.pad_token_id _snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" ) _snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" ) _snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" ) _snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1], 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1], 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = """$$$""" _snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ ) _snake_case : str = """This is a simple input""" _snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""] _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Tuple = tokenizer(a_ ) _snake_case : Optional[Any] = tokenizer(a_ ) self.assertEqual(out_s.input_ids[0], a_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _snake_case : Optional[int] = tokenizer.decode(out_s.input_ids ) _snake_case : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], a_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) _snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" _snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b""" _snake_case : Optional[Any] = tokenizer.encode(a_ ) _snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] _snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass
64
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class lowercase( __a ): '''simple docstring''' lowercase__ = "markuplm" def __init__( self: Optional[Any], a_: List[str]=30_522, a_: List[str]=768, a_: Optional[int]=12, a_: List[str]=12, a_: Optional[int]=3_072, a_: str="gelu", a_: str=0.1, a_: int=0.1, a_: Dict=512, a_: List[Any]=2, a_: List[Any]=0.02, a_: Optional[Any]=1E-12, a_: Any=0, a_: Union[str, Any]=0, a_: int=2, a_: Union[str, Any]=256, a_: Tuple=1_024, a_: str=216, a_: str=1_001, a_: str=32, a_: Optional[Any]=50, a_: List[str]="absolute", a_: Dict=True, a_: int=None, **a_: Any, ): '''simple docstring''' super().__init__( pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_, ) _snake_case : Optional[Any] = vocab_size _snake_case : Optional[Any] = hidden_size _snake_case : Union[str, Any] = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : Optional[Any] = hidden_act _snake_case : List[Any] = intermediate_size _snake_case : List[str] = hidden_dropout_prob _snake_case : int = attention_probs_dropout_prob _snake_case : Tuple = max_position_embeddings _snake_case : str = type_vocab_size _snake_case : Any = initializer_range _snake_case : Any = layer_norm_eps _snake_case : str = position_embedding_type _snake_case : List[Any] = use_cache _snake_case : Any = classifier_dropout # additional properties _snake_case : Dict = max_depth _snake_case : str = max_xpath_tag_unit_embeddings _snake_case : str = max_xpath_subs_unit_embeddings _snake_case : Union[str, Any] = tag_pad_id _snake_case : int = subs_pad_id _snake_case : List[Any] = xpath_unit_hidden_size
64
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser A_ = re.compile(r'''\s+''') def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def UpperCAmelCase__ (snake_case__ : Dict ): """simple docstring""" _snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )} def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" _snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ): """simple docstring""" _snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""] _snake_case : Tuple = example["""content"""].splitlines() for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ): """simple docstring""" _snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""] _snake_case : List[Any] = example["""content"""].splitlines() _snake_case : Dict = 0 _snake_case : str = 0 # first test for _, line in zip(range(snake_case__ ) , snake_case__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test _snake_case : Optional[int] = example["""content"""].count("""\n""" ) _snake_case : Tuple = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Optional[int] = ["""def """, """class """, """for """, """while """] _snake_case : str = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ): """simple docstring""" _snake_case : List[Any] = example["""content"""].splitlines() _snake_case : str = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def UpperCAmelCase__ (snake_case__ : List[str] ): """simple docstring""" _snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""] _snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ ) return {"ratio": ratio} def UpperCAmelCase__ (snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Optional[int] = {} results.update(get_hash(snake_case__ ) ) results.update(line_stats(snake_case__ ) ) results.update(alpha_stats(snake_case__ ) ) results.update(char_token_ratio(snake_case__ ) ) results.update(is_autogenerated(snake_case__ ) ) results.update(is_config_or_test(snake_case__ ) ) results.update(has_no_keywords(snake_case__ ) ) results.update(has_few_assignments(snake_case__ ) ) return results def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ): """simple docstring""" if not check_uniques(snake_case__ , snake_case__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" with open(snake_case__ , """rb""" ) as f_in: with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(snake_case__ , snake_case__ ) os.unlink(snake_case__ ) # Settings A_ = HfArgumentParser(PreprocessingArguments) A_ = parser.parse_args() if args.num_workers is None: A_ = multiprocessing.cpu_count() A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset A_ = time.time() A_ = load_dataset(args.dataset_name, split='''train''') print(F'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing A_ = time.time() A_ = ds.map(preprocess, num_proc=args.num_workers) print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes A_ = set(ds.unique('''hash''')) A_ = len(uniques) / len(ds) print(F'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics A_ = time.time() A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F'''Time to filter dataset: {time.time()-t_start:.2f}''') print(F'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: A_ = time.time() A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(F'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file A_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) A_ = output_dir / '''data''' data_dir.mkdir(exist_ok=True) A_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): A_ = str(data_dir / F'''file-{file_number+1:012}.json''') A_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
64
1
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: A_ = None A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''', }, } # TODO(PVP) - this should be removed in Transformers v5 A_ = { '''t5-small''': 5_12, '''t5-base''': 5_12, '''t5-large''': 5_12, '''t5-3b''': 5_12, '''t5-11b''': 5_12, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] lowercase__ = TaTokenizer lowercase__ = [] def __init__( self: List[Any], a_: Dict=None, a_: str=None, a_: Optional[Any]="</s>", a_: Optional[Any]="<unk>", a_: Any="<pad>", a_: Optional[int]=100, a_: Optional[Any]=None, **a_: Dict, ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: _snake_case : int = [f"<extra_id_{i}>" for i in range(a_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens _snake_case : Optional[Any] = len(set(filter(lambda a_ : bool("""extra_id_""" in str(a_ ) ), a_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( a_, tokenizer_file=a_, eos_token=a_, unk_token=a_, pad_token=a_, extra_ids=a_, additional_special_tokens=a_, **a_, ) _snake_case : str = vocab_file _snake_case : Dict = False if not self.vocab_file else True _snake_case : Dict = extra_ids @staticmethod def UpperCamelCase_ ( a_: Union[str, Any], a_: List[Any], a_: Optional[int] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: _snake_case : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""", a_, ) return max_model_length def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(a_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _snake_case : Optional[Any] = os.path.join( a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file, a_ ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase_ ( self: int, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : str = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: _snake_case : str = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase_ ( self: Tuple, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : List[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase_ ( self: int ): '''simple docstring''' return list( set(filter(lambda a_ : bool(re.search(r"""<extra_id_\d+>""", a_ ) ) is not None, self.additional_special_tokens ) ) ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(a_ ) for token in self.get_sentinel_tokens()]
64
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class lowercase( unittest.TestCase ): '''simple docstring''' @property def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = ort.SessionOptions() _snake_case : Union[str, Any] = False return options def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) _snake_case : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) _snake_case : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default _snake_case : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=a_, feature_extractor=a_, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : Optional[Any] = """A red cat sitting on a park bench""" _snake_case : Optional[int] = np.random.RandomState(0 ) _snake_case : Any = pipe( prompt=a_, image=a_, mask_image=a_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=a_, output_type="""np""", ) _snake_case : Dict = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
64
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json''' ), '''distilbert-base-uncased-finetuned-sst-2-english''': ( '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "distilbert" lowercase__ = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self: Any, a_: List[str]=30_522, a_: List[str]=512, a_: List[str]=False, a_: Any=6, a_: Optional[int]=12, a_: List[str]=768, a_: List[Any]=4 * 768, a_: List[str]=0.1, a_: Any=0.1, a_: Optional[int]="gelu", a_: Optional[Any]=0.02, a_: int=0.1, a_: Optional[int]=0.2, a_: Optional[int]=0, **a_: Optional[int], ): '''simple docstring''' _snake_case : int = vocab_size _snake_case : Any = max_position_embeddings _snake_case : Tuple = sinusoidal_pos_embds _snake_case : int = n_layers _snake_case : int = n_heads _snake_case : Tuple = dim _snake_case : Dict = hidden_dim _snake_case : Optional[Any] = dropout _snake_case : Optional[Any] = attention_dropout _snake_case : Optional[int] = activation _snake_case : Dict = initializer_range _snake_case : Optional[Any] = qa_dropout _snake_case : Dict = seq_classif_dropout super().__init__(**a_, pad_token_id=a_ ) class lowercase( __a ): '''simple docstring''' @property def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": _snake_case : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _snake_case : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
64
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ): """simple docstring""" for attribute in key.split(""".""" ): _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: _snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: _snake_case : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": _snake_case : int = value elif weight_type == "weight_g": _snake_case : str = value elif weight_type == "weight_v": _snake_case : Tuple = value elif weight_type == "bias": _snake_case : List[str] = value else: _snake_case : int = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ): """simple docstring""" _snake_case : List[Any] = [] _snake_case : Optional[Any] = fairseq_model.state_dict() _snake_case : str = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight _snake_case : Optional[Any] = None for name, value in fairseq_dict.items(): _snake_case : Optional[Any] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , ) _snake_case : Dict = True elif name.split(""".""" )[0] == "proj": _snake_case : Dict = fairseq_model.proj _snake_case : Optional[int] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2] _snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ ) if "weight_g" in name: _snake_case : str = """weight_g""" elif "weight_v" in name: _snake_case : Optional[Any] = """weight_v""" elif "bias" in name: _snake_case : Union[str, Any] = """bias""" elif "weight" in name: _snake_case : int = """weight""" else: _snake_case : Optional[int] = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"Unused weights: {unused_weights}" ) return proj_weight def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ): """simple docstring""" _snake_case : Any = full_name.split("""conv_layers.""" )[-1] _snake_case : Optional[int] = name.split(""".""" ) _snake_case : List[str] = int(items[0] ) _snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) _snake_case : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) _snake_case : List[Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) _snake_case : int = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) _snake_case : List[str] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case : Optional[Any] = emb.weight.shape _snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) _snake_case : Union[str, Any] = emb.weight.data return lin_layer def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f: _snake_case : Any = f.readlines() _snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines] _snake_case : str = len(snake_case__ ) _snake_case : Tuple = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ): """simple docstring""" _snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ ) _snake_case : List[str] = SpeechaTextaConfig.from_pretrained( snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ ) _snake_case : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) _snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) _snake_case : Optional[Any] = model[0].eval() # set weights for wav2vec2 encoder _snake_case : Any = WavaVecaModel(snake_case__ ) _snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ ) _snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ ) _snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ ) # set output linear layer unexpected_keys.remove("""embed_out""" ) _snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) _snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ ) _snake_case : Any = False # add projection layer _snake_case : int = nn.Parameter(projection_layer.weight ) _snake_case : Any = nn.Parameter(projection_layer.bias ) _snake_case : Any = create_vocab_dict(snake_case__ ) with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp: json.dump(snake_case__ , snake_case__ ) _snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) ) tokenizer.save_pretrained(snake_case__ ) _snake_case : str = hf_wavavec.config.to_dict() _snake_case : List[str] = tokenizer.pad_token_id _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Union[str, Any] = tokenizer.eos_token_id _snake_case : Optional[Any] = """speech_to_text_2""" _snake_case : Optional[int] = """wav2vec2""" _snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ ) hf_wavavec.save_pretrained(snake_case__ ) feature_extractor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') A_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
64
1
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = CodeGenTokenizer lowercase__ = CodeGenTokenizerFast lowercase__ = True lowercase__ = {"add_prefix_space": True} lowercase__ = False def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _snake_case : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] _snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) ) _snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _snake_case : List[Any] = {"""unk_token""": """<unk>"""} _snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) _snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def UpperCamelCase_ ( self: Any, **a_: int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Any, **a_: str ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ ) def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ): '''simple docstring''' _snake_case : Union[str, Any] = """lower newer""" _snake_case : Tuple = """lower newer""" return input_text, output_text def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) _snake_case : Optional[Any] = """lower newer""" _snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] _snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ ) self.assertListEqual(a_, a_ ) _snake_case : str = tokens + [tokenizer.unk_token] _snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return _snake_case : int = self.get_tokenizer() _snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ ) _snake_case : Dict = """lower newer""" # Testing tokenization _snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ ) _snake_case : List[str] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids without special tokens _snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ ) _snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ ) self.assertListEqual(a_, a_ ) # Testing conversion to ids with special tokens _snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ ) _snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ ) _snake_case : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_, a_ ) # Testing the unknown token _snake_case : Tuple = tokens + [rust_tokenizer.unk_token] _snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ ) def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ): '''simple docstring''' pass def UpperCamelCase_ ( self: int, a_: List[Any]=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): _snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ ) # Simple input _snake_case : Any = """This is a simple input""" _snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""] _snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""") _snake_case : Optional[Any] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Simple input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) # Pair input self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" ) # Pair input self.assertRaises( a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", ) def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" ) # Simple input _snake_case : List[Any] = """This is a simple input""" _snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""] _snake_case : Any = ("""This is a simple input""", """This is a pair""") _snake_case : str = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] _snake_case : str = tokenizer.pad_token_id _snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" ) _snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" ) _snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" ) _snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1], 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1], 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : Tuple = """$$$""" _snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ ) _snake_case : str = """This is a simple input""" _snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""] _snake_case : Union[str, Any] = tokenizer.bos_token_id _snake_case : Tuple = tokenizer(a_ ) _snake_case : Optional[Any] = tokenizer(a_ ) self.assertEqual(out_s.input_ids[0], a_ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _snake_case : Optional[int] = tokenizer.decode(out_s.input_ids ) _snake_case : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], a_ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) _snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" _snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b""" _snake_case : Optional[Any] = tokenizer.encode(a_ ) _snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] _snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ ) self.assertEqual(a_, a_ ) def UpperCamelCase_ ( self: str ): '''simple docstring''' pass
64
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ = 16 A_ = 32 def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _snake_case : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ : Any ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : List[Any] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : str = 16 elif accelerator.mixed_precision != "no": _snake_case : Optional[int] = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. _snake_case : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) _snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": _snake_case : List[Any] = 2 # Initialize accelerator _snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config["""lr"""] _snake_case : str = int(config["""num_epochs"""] ) _snake_case : Union[str, Any] = int(config["""seed"""] ) _snake_case : Union[str, Any] = int(config["""batch_size"""] ) _snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case__ ) def inner_training_loop(snake_case__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ ) _snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate scheduler _snake_case : str = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : int = model(**snake_case__ ) _snake_case : str = outputs.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) _snake_case : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , snake_case__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _snake_case : Dict = parser.parse_args() _snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
64
1
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins A_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : List[str] ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Dict = tmp_path_factory.getbasetemp() / """cache""" _snake_case : List[str] = test_hf_cache_home / """datasets""" _snake_case : str = test_hf_cache_home / """metrics""" _snake_case : List[str] = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(snake_case__ ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(snake_case__ ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(snake_case__ ) ) _snake_case : List[str] = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(snake_case__ ) ) _snake_case : Dict = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(snake_case__ ) ) @pytest.fixture(autouse=snake_case__ , scope="""session""" ) def UpperCAmelCase__ (): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , snake_case__ ) @pytest.fixture def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , snake_case__ )
64
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ): """simple docstring""" _snake_case : Any = None if token is not None: _snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"} # The id of a workflow (not of a workflow run) _snake_case : List[str] = """636036""" _snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" _snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json() return result["workflow_runs"] def UpperCAmelCase__ (snake_case__ : Optional[Any] ): """simple docstring""" _snake_case : str = get_daily_ci_runs(snake_case__ ) _snake_case : str = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _snake_case : List[str] = workflow_run["""id"""] break return workflow_run_id def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): """simple docstring""" _snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ ) if workflow_run_id is not None: _snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _snake_case : Optional[int] = artifacts_links[artifact_name] download_artifact( artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ ) def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ): """simple docstring""" get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ ) _snake_case : int = {} for artifact_name in artifact_names: _snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" ) if os.path.isfile(snake_case__ ): _snake_case : Tuple = {} with zipfile.ZipFile(snake_case__ ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case__ ): # read the file with z.open(snake_case__ ) as f: _snake_case : Any = f.read().decode("""UTF-8""" ) return results
64
1
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings A_ = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class lowercase( __a ): '''simple docstring''' lowercase__ = field(default=__a , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase__ = field( default=__a , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase__ = field( default=__a , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase__ = field( default=__a , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase__ = field( default=__a , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case : Tuple = super().to_dict() for k, v in d.items(): if isinstance(a_, a_ ): _snake_case : int = v.to_dict() return d
64
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging A_ = logging.get_logger(__name__) class lowercase: '''simple docstring''' lowercase__ = 42 lowercase__ = None @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str, **a_: Dict ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Union[str, Any], a_: List[str] ): '''simple docstring''' raise NotImplementedError def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def UpperCamelCase_ ( cls: Tuple ): '''simple docstring''' return f"`pip install {cls.pip_package or cls.name}`" class lowercase( __a ): '''simple docstring''' lowercase__ = "optuna" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def UpperCamelCase_ ( self: Union[str, Any], a_: List[Any], a_: int, a_: str, **a_: List[str] ): '''simple docstring''' return run_hp_search_optuna(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: Optional[Any], a_: Any ): '''simple docstring''' return default_hp_space_optuna(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "ray" lowercase__ = "'ray[tune]'" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_ray_available() def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: int, a_: str, **a_: List[Any] ): '''simple docstring''' return run_hp_search_ray(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: Tuple ): '''simple docstring''' return default_hp_space_ray(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "sigopt" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def UpperCamelCase_ ( self: Dict, a_: str, a_: int, a_: str, **a_: int ): '''simple docstring''' return run_hp_search_sigopt(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: List[str] ): '''simple docstring''' return default_hp_space_sigopt(a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "wandb" @staticmethod def UpperCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: int, a_: str, **a_: Union[str, Any] ): '''simple docstring''' return run_hp_search_wandb(a_, a_, a_, **a_ ) def UpperCamelCase_ ( self: str, a_: Any ): '''simple docstring''' return default_hp_space_wandb(a_ ) A_ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCAmelCase__ (): """simple docstring""" _snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(snake_case__ ) > 0: _snake_case : Any = available_backends[0].name if len(snake_case__ ) > 1: logger.info( F"{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
64
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "ViTImageProcessor" lowercase__ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self: str, a_: Optional[int]=None, a_: Dict=None, **a_: Optional[Any] ): '''simple docstring''' _snake_case : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : Tuple = kwargs.pop("""feature_extractor""" ) _snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) def __call__( self: List[str], a_: Union[str, Any]=None, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, **a_: Dict ): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError("""You have to specify either text, visual prompt or images.""" ) if text is not None and visual_prompt is not None: raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" ) if text is not None: _snake_case : Dict = self.tokenizer(a_, return_tensors=a_, **a_ ) if visual_prompt is not None: _snake_case : List[Any] = self.image_processor(a_, return_tensors=a_, **a_ ) if images is not None: _snake_case : Tuple = self.image_processor(a_, return_tensors=a_, **a_ ) if visual_prompt is not None and images is not None: _snake_case : Any = { """pixel_values""": image_features.pixel_values, """conditional_pixel_values""": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: _snake_case : Optional[int] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: _snake_case : Optional[Any] = { """conditional_pixel_values""": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**a_ ), tensor_type=a_ ) def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: Any, *a_: Union[str, Any], **a_: str ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "AutoImageProcessor" lowercase__ = "AutoTokenizer" def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ): '''simple docstring''' _snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : str = kwargs.pop("""feature_extractor""" ) _snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) _snake_case : Dict = self.image_processor _snake_case : Any = False def __call__( self: Any, *a_: Any, **a_: Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*a_, **a_ ) _snake_case : Dict = kwargs.pop("""images""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""text""", a_ ) if len(a_ ) > 0: _snake_case : Optional[int] = args[0] _snake_case : Tuple = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _snake_case : Tuple = self.image_processor(a_, *a_, **a_ ) if text is not None: _snake_case : Tuple = self.tokenizer(a_, **a_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case : List[str] = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @contextmanager def UpperCamelCase_ ( self: Dict ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _snake_case : Any = True _snake_case : Optional[int] = self.tokenizer yield _snake_case : int = self.image_processor _snake_case : Optional[int] = False def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ): '''simple docstring''' if added_vocab is None: _snake_case : Dict = self.tokenizer.get_added_vocab() _snake_case : str = {} while tokens: _snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE ) if start_token is None: break _snake_case : List[Any] = start_token.group(1 ) _snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE ) _snake_case : Dict = start_token.group() if end_token is None: _snake_case : List[Any] = tokens.replace(a_, """""" ) else: _snake_case : List[str] = end_token.group() _snake_case : str = re.escape(a_ ) _snake_case : str = re.escape(a_ ) _snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE ) if content is not None: _snake_case : int = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ ) if value: if len(a_ ) == 1: _snake_case : List[str] = value[0] _snake_case : List[str] = value else: # leaf nodes _snake_case : Tuple = [] for leaf in content.split(r"""<sep/>""" ): _snake_case : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _snake_case : int = leaf[1:-2] # for categorical special tokens output[key].append(a_ ) if len(output[key] ) == 1: _snake_case : int = output[key][0] _snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ ) if len(a_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 A_ = get_tests_dir('''fixtures''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : str = mock.Mock() _snake_case : List[Any] = 500 _snake_case : List[Any] = {} _snake_case : int = HTTPError _snake_case : Optional[int] = {} # Download this model to make sure it's in the cache. _snake_case : Any = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""", return_value=a_ ) as mock_head: _snake_case : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : List[Any] = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' with self.assertRaises(a_ ): # config is in subfolder, the following should not work without specifying the subfolder _snake_case : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) _snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""", subfolder="""feature_extractor""" ) self.assertIsNotNone(a_ ) @is_staging_test class lowercase( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls: List[Any] ): '''simple docstring''' _snake_case : int = TOKEN HfFolder.save_token(a_ ) @classmethod def UpperCamelCase_ ( cls: int ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[Any] = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-image-processor""", use_auth_token=self._token ) _snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""test-image-processor""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : int = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""valid_org/test-image-processor""", use_auth_token=self._token ) _snake_case : List[str] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""valid_org/test-image-processor-org""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : Any = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _snake_case : Dict = CustomImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-dynamic-image-processor""", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""}, ) _snake_case : Optional[int] = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=a_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, """CustomImageProcessor""" )
64
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : int = 0.00 _snake_case : int = 0 for resistor in resistors: if resistor <= 0: _snake_case : Dict = F"Resistor at index {index} has a negative or zero value!" raise ValueError(snake_case__ ) first_sum += 1 / float(snake_case__ ) index += 1 return 1 / first_sum def UpperCAmelCase__ (snake_case__ : list[float] ): """simple docstring""" _snake_case : Union[str, Any] = 0.00 _snake_case : Any = 0 for resistor in resistors: sum_r += resistor if resistor < 0: _snake_case : Any = F"Resistor at index {index} has a negative value!" raise ValueError(snake_case__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
64
1
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def UpperCAmelCase__ (snake_case__ : Iterable[str] , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = iter(snake_case__ ) while True: _snake_case : List[str] = tuple(itertools.islice(snake_case__ , snake_case__ ) ) if not chunk: return yield chunk def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] ) _snake_case : List[str] = """""" if len(snake_case__ ) < 2: return dirty for i in range(len(snake_case__ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(snake_case__ ) & 1: clean += "X" return clean def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _snake_case : List[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(snake_case__ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(snake_case__ ) return table def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" _snake_case : Optional[int] = generate_table(snake_case__ ) _snake_case : Tuple = prepare_input(snake_case__ ) _snake_case : int = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(snake_case__ , 2 ): _snake_case , _snake_case : int = divmod(table.index(snake_case__ ) , 5 ) _snake_case , _snake_case : Dict = divmod(table.index(snake_case__ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" _snake_case : Union[str, Any] = generate_table(snake_case__ ) _snake_case : List[Any] = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(snake_case__ , 2 ): _snake_case , _snake_case : Optional[int] = divmod(table.index(snake_case__ ) , 5 ) _snake_case , _snake_case : Tuple = divmod(table.index(snake_case__ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
64
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } A_ = { '''Salesforce/codegen-350M-mono''': 20_48, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] lowercase__ = CodeGenTokenizer def __init__( self: Union[str, Any], a_: List[Any]=None, a_: str=None, a_: str=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: str="<|endoftext|>", a_: List[Any]=False, **a_: List[str], ): '''simple docstring''' super().__init__( a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, ) if kwargs.pop("""add_bos_token""", a_ ): _snake_case : str = kwargs.pop("""name_or_path""", """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) _snake_case : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space: _snake_case : Dict = getattr(a_, pre_tok_state.pop("""type""" ) ) _snake_case : Dict = add_prefix_space _snake_case : str = pre_tok_class(**a_ ) _snake_case : List[Any] = add_prefix_space def UpperCamelCase_ ( self: Any, *a_: Any, **a_: int ): '''simple docstring''' _snake_case : Optional[int] = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: List[str] ): '''simple docstring''' _snake_case : Dict = kwargs.get("""is_split_into_words""", a_ ) assert self.add_prefix_space or not is_split_into_words, ( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*a_, **a_ ) def UpperCamelCase_ ( self: Optional[int], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ ) def UpperCamelCase_ ( self: str, a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: List[str], ): '''simple docstring''' _snake_case : Any = super().decode( token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, ) if truncate_before_pattern is not None and len(a_ ) > 0: _snake_case : List[str] = self.truncate(a_, a_ ) return decoded_text def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Optional[Any] ): '''simple docstring''' def find_re(a_: Dict, a_: str, a_: Union[str, Any] ): _snake_case : Any = pattern.search(a_, a_ ) return m.start() if m else -1 _snake_case : Tuple = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern] _snake_case : List[Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : int = completion[: prints[1].start()] _snake_case : List[str] = list(re.finditer("""^def""", a_, re.MULTILINE ) ) if len(a_ ) > 1: _snake_case : List[Any] = completion[: defs[1].start()] _snake_case : int = 0 _snake_case : List[Any] = [ pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1 ] if len(a_ ) > 0: return completion[: min(a_ )] else: return completion
64
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : List[Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _snake_case : Tuple = 1_92 _snake_case : Any = 7_68 _snake_case : Any = 12 _snake_case : List[Any] = 3 _snake_case : int = [8_00, 13_33] _snake_case : Tuple = False elif yolos_name == "yolos_s_dWr": _snake_case : Tuple = 3_30 _snake_case : List[str] = 14 _snake_case : List[str] = 6 _snake_case : Union[str, Any] = 13_20 elif "yolos_s" in yolos_name: _snake_case : Union[str, Any] = 3_84 _snake_case : List[str] = 15_36 _snake_case : Any = 12 _snake_case : Optional[int] = 6 elif "yolos_b" in yolos_name: _snake_case : Dict = [8_00, 13_44] _snake_case : str = 91 _snake_case : Optional[Any] = """huggingface/label-files""" _snake_case : str = """coco-detection-id2label.json""" _snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) ) _snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : List[str] = idalabel _snake_case : List[str] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _snake_case : Any = in_proj_weight[: config.hidden_size, :] _snake_case : Optional[Any] = in_proj_bias[: config.hidden_size] _snake_case : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case : Tuple = in_proj_weight[-config.hidden_size :, :] _snake_case : List[Any] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" if "backbone" in name: _snake_case : str = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: _snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: _snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: _snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: _snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: _snake_case : str = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: _snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: _snake_case : str = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: _snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _snake_case : str = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: _snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: _snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: _snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ): """simple docstring""" for key in orig_state_dict.copy().keys(): _snake_case : List[str] = orig_state_dict.pop(snake_case__ ) if "qkv" in key: _snake_case : Optional[Any] = key.split(""".""" ) _snake_case : Optional[Any] = int(key_split[2] ) _snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _snake_case : str = val[:dim, :] _snake_case : Optional[Any] = val[ dim : dim * 2, : ] _snake_case : Optional[Any] = val[-dim:, :] else: _snake_case : Dict = val[:dim] _snake_case : Any = val[dim : dim * 2] _snake_case : Dict = val[-dim:] else: _snake_case : Tuple = val return orig_state_dict def UpperCAmelCase__ (): """simple docstring""" _snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" _snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ): """simple docstring""" _snake_case : Optional[Any] = get_yolos_config(snake_case__ ) # load original state_dict _snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""] # load 🤗 model _snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ ) model.eval() _snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) # Check outputs on an image, prepared by YolosImageProcessor _snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12 _snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ ) _snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _snake_case : Optional[Any] = model(**snake_case__ ) _snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes _snake_case , _snake_case : Dict = None, None if yolos_name == "yolos_ti": _snake_case : Optional[Any] = torch.tensor( [[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] ) _snake_case : Tuple = torch.tensor( [[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] ) elif yolos_name == "yolos_s_200_pre": _snake_case : List[str] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] ) _snake_case : List[str] = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] ) elif yolos_name == "yolos_s_300_pre": _snake_case : Dict = torch.tensor( [[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] ) _snake_case : Union[str, Any] = torch.tensor( [[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] ) elif yolos_name == "yolos_s_dWr": _snake_case : Tuple = torch.tensor( [[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] ) _snake_case : Optional[Any] = torch.tensor( [[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] ) elif yolos_name == "yolos_base": _snake_case : int = torch.tensor( [[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] ) _snake_case : Optional[int] = torch.tensor( [[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] ) else: raise ValueError(F"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: _snake_case : Dict = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) _snake_case : str = model_mapping[yolos_name] image_processor.push_to_hub(snake_case__ , organization="""hustvl""" ) model.push_to_hub(snake_case__ , organization="""hustvl""" ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A_ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
64
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
64
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str]=False ): """simple docstring""" _snake_case : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[str]=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _snake_case : List[Any] = """""" else: _snake_case : List[Any] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) _snake_case : Optional[Any] = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _snake_case : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] _snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size] _snake_case : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case : Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] _snake_case : List[str] = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" _snake_case : List[str] = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int ): """simple docstring""" _snake_case : Optional[Any] = dct.pop(snake_case__ ) _snake_case : Union[str, Any] = val def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : str ): """simple docstring""" _snake_case : str = ViTMSNConfig() _snake_case : Any = 10_00 _snake_case : Tuple = """datasets/huggingface/label-files""" _snake_case : Dict = """imagenet-1k-id2label.json""" _snake_case : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ ) , """r""" ) ) _snake_case : Any = {int(snake_case__ ): v for k, v in idalabel.items()} _snake_case : List[Any] = idalabel _snake_case : str = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _snake_case : Tuple = 3_84 _snake_case : Dict = 15_36 _snake_case : Tuple = 6 elif "l16" in checkpoint_url: _snake_case : Any = 10_24 _snake_case : int = 40_96 _snake_case : str = 24 _snake_case : Optional[int] = 16 _snake_case : List[Any] = 0.1 elif "b4" in checkpoint_url: _snake_case : Tuple = 4 elif "l7" in checkpoint_url: _snake_case : int = 7 _snake_case : Dict = 10_24 _snake_case : Optional[Any] = 40_96 _snake_case : Any = 24 _snake_case : Union[str, Any] = 16 _snake_case : Optional[int] = 0.1 _snake_case : int = ViTMSNModel(snake_case__ ) _snake_case : Optional[int] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )["""target_encoder"""] _snake_case : List[str] = ViTImageProcessor(size=config.image_size ) remove_projection_head(snake_case__ ) _snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_q_k_v(snake_case__ , snake_case__ , base_model=snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() _snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _snake_case : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) _snake_case : str = ViTImageProcessor( size=config.image_size , image_mean=snake_case__ , image_std=snake_case__ ) _snake_case : Any = image_processor(images=snake_case__ , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _snake_case : int = model(**snake_case__ ) _snake_case : List[Any] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _snake_case : Optional[Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] ) elif "b16" in checkpoint_url: _snake_case : str = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] ) elif "l16" in checkpoint_url: _snake_case : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] ) elif "b4" in checkpoint_url: _snake_case : List[Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] ) else: _snake_case : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , snake_case__ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
64
1
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCAmelCase__ (snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ): """simple docstring""" _snake_case : Optional[Any] = a while True: _snake_case : Optional[Any] = Decimal(snake_case__ ) - ( Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(snake_case__ ) ) < precision: # noqa: S307 return float(snake_case__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
64
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ): """simple docstring""" _snake_case : Optional[Any] = list(snake_case__ ) _snake_case : List[Any] = list(snake_case__ ) _snake_case : List[Any] = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count += 1 _snake_case : Any = """_""" if count > 1: return False else: return "".join(snake_case__ ) def UpperCAmelCase__ (snake_case__ : list[str] ): """simple docstring""" _snake_case : int = [] while True: _snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ ) _snake_case : int = [] for i in range(len(snake_case__ ) ): for j in range(i + 1 , len(snake_case__ ) ): _snake_case : List[Any] = compare_string(binary[i] , binary[j] ) if k is False: _snake_case : Dict = """*""" _snake_case : List[Any] = """*""" temp.append("""X""" ) for i in range(len(snake_case__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case__ ) == 0: return pi _snake_case : Optional[int] = list(set(snake_case__ ) ) def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ): """simple docstring""" _snake_case : Optional[int] = [] for minterm in minterms: _snake_case : Any = """""" for _ in range(snake_case__ ): _snake_case : Optional[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case__ ) return temp def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ): """simple docstring""" _snake_case : Dict = list(snake_case__ ) _snake_case : List[str] = list(snake_case__ ) _snake_case : Tuple = 0 for i in range(len(snake_case__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ): """simple docstring""" _snake_case : Any = [] _snake_case : Union[str, Any] = [0] * len(snake_case__ ) for i in range(len(chart[0] ) ): _snake_case : Tuple = 0 _snake_case : str = -1 for j in range(len(snake_case__ ) ): if chart[j][i] == 1: count += 1 _snake_case : Union[str, Any] = j if count == 1: _snake_case : Union[str, Any] = 1 for i in range(len(snake_case__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case__ ) ): _snake_case : List[Any] = 0 temp.append(prime_implicants[i] ) while True: _snake_case : Optional[int] = 0 _snake_case : str = -1 _snake_case : Any = 0 for i in range(len(snake_case__ ) ): _snake_case : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _snake_case : Dict = count_n _snake_case : Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case__ ) ): _snake_case : Optional[Any] = 0 def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ): """simple docstring""" _snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )] for i in range(len(snake_case__ ) ): _snake_case : Any = prime_implicants[i].count("""_""" ) for j in range(len(snake_case__ ) ): if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ): _snake_case : Tuple = 1 return chart def UpperCAmelCase__ (): """simple docstring""" _snake_case : int = int(input("""Enter the no. of variables\n""" ) ) _snake_case : List[str] = [ float(snake_case__ ) for x in input( """Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split() ] _snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ ) _snake_case : str = check(snake_case__ ) print("""Prime Implicants are:""" ) print(snake_case__ ) _snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ ) _snake_case : str = selection(snake_case__ , snake_case__ ) print("""Essential Prime Implicants are:""" ) print(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
64
1
"""simple docstring""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging A_ = logging.get_logger(__name__) def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[Any] = R"""\w+[.]\d+""" _snake_case : Union[str, Any] = re.findall(snake_case__ , snake_case__ ) for pat in pats: _snake_case : str = key.replace(snake_case__ , """_""".join(pat.split(""".""" ) ) ) return key def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : List[Any] ): """simple docstring""" _snake_case : Optional[int] = pt_tuple_key[:-1] + ("""scale""",) if ( any("""norm""" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): _snake_case : List[str] = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: _snake_case : Tuple = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: _snake_case : Tuple = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer _snake_case : Tuple = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: _snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _snake_case : Any = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": _snake_case : Optional[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _snake_case : Dict = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=42 ): """simple docstring""" _snake_case : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params _snake_case : List[str] = flax_model.init_weights(PRNGKey(snake_case__ ) ) _snake_case : int = flatten_dict(snake_case__ ) _snake_case : List[str] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case : List[str] = rename_key(snake_case__ ) _snake_case : List[Any] = tuple(renamed_pt_key.split(""".""" ) ) # Correctly rename weight parameters _snake_case , _snake_case : int = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown _snake_case : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ )
64
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : Union[str, Any] ): """simple docstring""" stooge(snake_case__ , 0 , len(snake_case__ ) - 1 ) return arr def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int ): """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case : Tuple = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case : Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case__ , i + t , (snake_case__) ) # Recursively sort first 2/3 elements stooge(snake_case__ , snake_case__ , (h - t) ) if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
64
1
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A_ = logging.get_logger(__name__) A_ = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) A_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _snake_case : Any = model_type_to_module_name(snake_case__ ) _snake_case : str = importlib.import_module(F".{module_name}" , """transformers.models""" ) try: return getattr(snake_case__ , snake_case__ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(snake_case__ , """__name__""" , snake_case__ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _snake_case : Optional[int] = importlib.import_module("""transformers""" ) if hasattr(snake_case__ , snake_case__ ): return getattr(snake_case__ , snake_case__ ) return None def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Dict , ): """simple docstring""" _snake_case : Union[str, Any] = get_file_from_repo( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) if resolved_config_file is None: logger.info( """Could not locate the image processor configuration file, will try to use the model config instead.""" ) return {} with open(snake_case__ , encoding="""utf-8""" ) as reader: return json.load(snake_case__ ) class lowercase: '''simple docstring''' def __init__( self: Optional[int] ): '''simple docstring''' raise EnvironmentError( """AutoImageProcessor is designed to be instantiated """ """using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(a_ ) def UpperCamelCase_ ( cls: List[str], a_: Any, **a_: Dict ): '''simple docstring''' _snake_case : Optional[int] = kwargs.pop("""config""", a_ ) _snake_case : Any = kwargs.pop("""trust_remote_code""", a_ ) _snake_case : Dict = True _snake_case , _snake_case : Optional[int] = ImageProcessingMixin.get_image_processor_dict(a_, **a_ ) _snake_case : Any = config_dict.get("""image_processor_type""", a_ ) _snake_case : Tuple = None if "AutoImageProcessor" in config_dict.get("""auto_map""", {} ): _snake_case : Any = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _snake_case : Optional[Any] = config_dict.pop("""feature_extractor_type""", a_ ) if feature_extractor_class is not None: logger.warning( """Could not find image processor class in the image processor config or the model config. Loading""" """ based on pattern matching with the model's feature extractor configuration.""" ) _snake_case : Optional[int] = feature_extractor_class.replace("""FeatureExtractor""", """ImageProcessor""" ) if "AutoFeatureExtractor" in config_dict.get("""auto_map""", {} ): _snake_case : Dict = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] _snake_case : List[str] = feature_extractor_auto_map.replace("""FeatureExtractor""", """ImageProcessor""" ) logger.warning( """Could not find image processor auto map in the image processor config or the model config.""" """ Loading based on pattern matching with the model's feature extractor configuration.""" ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(a_, a_ ): _snake_case : List[str] = AutoConfig.from_pretrained(a_, **a_ ) # It could be in `config.image_processor_type`` _snake_case : int = getattr(a_, """image_processor_type""", a_ ) if hasattr(a_, """auto_map""" ) and "AutoImageProcessor" in config.auto_map: _snake_case : List[Any] = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: _snake_case : int = image_processor_class_from_name(a_ ) _snake_case : List[Any] = image_processor_auto_map is not None _snake_case : Union[str, Any] = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING _snake_case : Union[str, Any] = resolve_trust_remote_code( a_, a_, a_, a_ ) if has_remote_code and trust_remote_code: _snake_case : Optional[Any] = get_class_from_dynamic_module( a_, a_, **a_ ) _snake_case : Optional[Any] = kwargs.pop("""code_revision""", a_ ) if os.path.isdir(a_ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(a_, **a_ ) elif image_processor_class is not None: return image_processor_class.from_dict(a_, **a_ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(a_ ) in IMAGE_PROCESSOR_MAPPING: _snake_case : Union[str, Any] = IMAGE_PROCESSOR_MAPPING[type(a_ )] return image_processor_class.from_dict(a_, **a_ ) raise ValueError( f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" ) @staticmethod def UpperCamelCase_ ( a_: Dict, a_: Optional[Any] ): '''simple docstring''' IMAGE_PROCESSOR_MAPPING.register(a_, a_ )
64
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowercase( metaclass=__a ): '''simple docstring''' lowercase__ = ["note_seq"] def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ): '''simple docstring''' requires_backends(self, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] ) @classmethod def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ): '''simple docstring''' requires_backends(cls, ["""note_seq"""] )
64
1
"""simple docstring""" import sys A_ = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCAmelCase__ (snake_case__ : str ): """simple docstring""" _snake_case : Optional[Any] = 1 for digit in s: product *= int(snake_case__ ) return product def UpperCAmelCase__ (snake_case__ : str = N ): """simple docstring""" _snake_case : Tuple = -sys.maxsize - 1 _snake_case : Optional[int] = n[:13] _snake_case : Any = 13 while cur_index < len(snake_case__ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _snake_case : str = substr[1:] + n[cur_index] cur_index += 1 else: _snake_case : str = max(snake_case__ , str_eval(snake_case__ ) ) _snake_case : int = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
64
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowercase: '''simple docstring''' def __init__( self: List[Any], a_: List[str] ): '''simple docstring''' _snake_case : int = data _snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0] @staticmethod def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0Xffffffff def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) _snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) ) return padded_data def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 ) ] def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ): '''simple docstring''' _snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64 for i in range(16, 80 ): _snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 ) return w def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Union[str, Any] = self.padding() _snake_case : str = self.split_blocks() for block in self.blocks: _snake_case : Any = self.expand_block(a_ ) _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h for i in range(0, 80 ): if 0 <= i < 20: _snake_case : int = (b & c) | ((~b) & d) _snake_case : str = 0X5a827999 elif 20 <= i < 40: _snake_case : Optional[int] = b ^ c ^ d _snake_case : str = 0X6ed9eba1 elif 40 <= i < 60: _snake_case : List[Any] = (b & c) | (b & d) | (c & d) _snake_case : List[Any] = 0X8f1bbcdc elif 60 <= i < 80: _snake_case : List[Any] = b ^ c ^ d _snake_case : int = 0Xca62c1d6 _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = ( self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff, a, self.rotate(a_, 30 ), c, d, ) _snake_case : Union[str, Any] = ( self.h[0] + a & 0Xffffffff, self.h[1] + b & 0Xffffffff, self.h[2] + c & 0Xffffffff, self.h[3] + d & 0Xffffffff, self.h[4] + e & 0Xffffffff, ) return ("{:08x}" * 5).format(*self.h ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = B"""Test String""" assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324 def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _snake_case : Union[str, Any] = parser.parse_args() _snake_case : List[Any] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _snake_case : str = f.read() else: _snake_case : int = bytes(snake_case__ , """utf-8""" ) print(SHAaHash(snake_case__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
64
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A_ = logging.get_logger(__name__) A_ = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_vision_model" def __init__( self: Tuple, a_: Optional[int]=768, a_: Any=12, a_: Tuple=3, a_: Optional[Any]=16, a_: Dict=288, a_: Optional[Any]=1, a_: Optional[Any]=1E-05, a_: int=False, a_: Optional[int]=True, a_: Union[str, Any]=False, **a_: Any, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : Tuple = hidden_size _snake_case : List[Any] = num_hidden_layers _snake_case : List[Any] = num_channels _snake_case : Optional[int] = patch_size _snake_case : Any = image_size _snake_case : Optional[Any] = initializer_factor _snake_case : str = layer_norm_eps _snake_case : Tuple = stop_gradient _snake_case : Any = share_layernorm _snake_case : Dict = remove_last_layer @classmethod def UpperCamelCase_ ( cls: List[Any], a_: Union[str, os.PathLike], **a_: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower_text_model" def __init__( self: Tuple, a_: Union[str, Any]=50_265, a_: int=768, a_: Optional[Any]=12, a_: Dict=12, a_: int=1, a_: Any=3_072, a_: Tuple="gelu", a_: Union[str, Any]=0.1, a_: Union[str, Any]=0.1, a_: str=514, a_: Dict=1, a_: str=1E-05, a_: Union[str, Any]=1, a_: List[Any]=0, a_: Optional[int]=2, a_: str="absolute", a_: Union[str, Any]=True, **a_: int, ): '''simple docstring''' super().__init__(**a_ ) _snake_case : List[Any] = vocab_size _snake_case : Optional[int] = hidden_size _snake_case : Union[str, Any] = num_hidden_layers _snake_case : Tuple = num_attention_heads _snake_case : int = hidden_act _snake_case : List[str] = initializer_factor _snake_case : List[Any] = intermediate_size _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : List[Any] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : int = type_vocab_size _snake_case : List[Any] = layer_norm_eps _snake_case : Union[str, Any] = position_embedding_type _snake_case : List[str] = use_cache _snake_case : Tuple = pad_token_id _snake_case : Dict = bos_token_id _snake_case : Any = eos_token_id @classmethod def UpperCamelCase_ ( cls: Dict, a_: Union[str, os.PathLike], **a_: Tuple ): '''simple docstring''' _snake_case , _snake_case : List[str] = cls.get_config_dict(a_, **a_ ) if config_dict.get("""model_type""" ) == "bridgetower": _snake_case : List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." ) return cls.from_dict(a_, **a_ ) class lowercase( __a ): '''simple docstring''' lowercase__ = "bridgetower" def __init__( self: Union[str, Any], a_: Optional[int]=True, a_: List[Any]="gelu", a_: str=768, a_: Dict=1, a_: Optional[int]=1E-05, a_: List[str]=False, a_: str="add", a_: List[str]=12, a_: Dict=6, a_: List[Any]=False, a_: Optional[int]=False, a_: int=None, a_: Optional[Any]=None, **a_: Any, ): '''simple docstring''' _snake_case : Any = kwargs.pop("""text_config_dict""", a_ ) _snake_case : Dict = kwargs.pop("""vision_config_dict""", a_ ) super().__init__(**a_ ) _snake_case : Tuple = share_cross_modal_transformer_layers _snake_case : List[Any] = hidden_act _snake_case : Dict = hidden_size _snake_case : Any = initializer_factor _snake_case : List[Any] = layer_norm_eps _snake_case : Tuple = share_link_tower_layers _snake_case : Optional[int] = link_tower_type _snake_case : Any = num_attention_heads _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[str] = tie_word_embeddings _snake_case : Any = init_layernorm_from_vision_encoder if text_config is None: _snake_case : Any = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: _snake_case : Optional[int] = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) _snake_case : int = BridgeTowerTextConfig(**a_ ) _snake_case : List[str] = BridgeTowerVisionConfig(**a_ ) @classmethod def UpperCamelCase_ ( cls: Union[str, Any], a_: BridgeTowerTextConfig, a_: BridgeTowerVisionConfig, **a_: Dict ): '''simple docstring''' return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : List[str] = copy.deepcopy(self.__dict__ ) _snake_case : Dict = self.text_config.to_dict() _snake_case : List[str] = self.vision_config.to_dict() _snake_case : List[str] = self.__class__.model_type return output
64
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings A_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(__a ) class lowercase( __a ): '''simple docstring''' lowercase__ = "rag" lowercase__ = True def __init__( self: Union[str, Any], a_: int=None, a_: Tuple=True, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, a_: Optional[Any]=None, a_: List[str]=None, a_: Optional[Any]=" / ", a_: Tuple=" // ", a_: List[Any]=5, a_: Dict=300, a_: Tuple=768, a_: Optional[Any]=8, a_: int="wiki_dpr", a_: Any="train", a_: Optional[int]="compressed", a_: Optional[int]=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: str=False, a_: Dict=0.0, a_: Union[str, Any]=True, a_: Union[str, Any]=False, a_: str=False, a_: List[str]=False, a_: Union[str, Any]=True, a_: Any=None, **a_: List[Any], ): '''simple docstring''' super().__init__( bos_token_id=a_, pad_token_id=a_, eos_token_id=a_, decoder_start_token_id=a_, forced_eos_token_id=a_, is_encoder_decoder=a_, prefix=a_, vocab_size=a_, **a_, ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" _snake_case : Union[str, Any] = kwargs.pop("""question_encoder""" ) _snake_case : List[str] = question_encoder_config.pop("""model_type""" ) _snake_case : Union[str, Any] = kwargs.pop("""generator""" ) _snake_case : Any = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig _snake_case : Union[str, Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Optional[Any] = AutoConfig.for_model(a_, **a_ ) _snake_case : Any = reduce_loss _snake_case : Optional[int] = label_smoothing _snake_case : Dict = exclude_bos_score _snake_case : int = do_marginalize _snake_case : Optional[Any] = title_sep _snake_case : Any = doc_sep _snake_case : List[str] = n_docs _snake_case : Tuple = max_combined_length _snake_case : Optional[Any] = dataset _snake_case : Union[str, Any] = dataset_split _snake_case : Tuple = index_name _snake_case : Any = retrieval_vector_size _snake_case : Union[str, Any] = retrieval_batch_size _snake_case : str = passages_path _snake_case : Tuple = index_path _snake_case : List[Any] = use_dummy_dataset _snake_case : Optional[Any] = output_retrieved _snake_case : Tuple = do_deduplication _snake_case : Union[str, Any] = use_cache if self.forced_eos_token_id is None: _snake_case : Dict = getattr(self.generator, """forced_eos_token_id""", a_ ) @classmethod def UpperCamelCase_ ( cls: Any, a_: PretrainedConfig, a_: PretrainedConfig, **a_: Optional[Any] ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **a_ ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[int] = copy.deepcopy(self.__dict__ ) _snake_case : List[str] = self.question_encoder.to_dict() _snake_case : Tuple = self.generator.to_dict() _snake_case : Dict = self.__class__.model_type return output
64
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available A_ = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
64
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union A_ = TypeVar('''T''') A_ = Union[List[T], Tuple[T, ...]] A_ = Union[T, List[T], Dict[str, T]] A_ = Union[str, bytes, os.PathLike]
64
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A_ = 16 A_ = 32 def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _snake_case : Any = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(snake_case__ : Any ): # max_length=None => use the model max length (it's actually the default) _snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : List[Any] = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : str = 16 elif accelerator.mixed_precision != "no": _snake_case : Optional[int] = 8 else: _snake_case : Optional[int] = None return tokenizer.pad( snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , ) # Instantiate dataloaders. _snake_case : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) _snake_case : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A_ = mocked_dataloaders # noqa: F811 def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1": _snake_case : List[Any] = 2 # Initialize accelerator _snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config["""lr"""] _snake_case : str = int(config["""num_epochs"""] ) _snake_case : Union[str, Any] = int(config["""seed"""] ) _snake_case : Union[str, Any] = int(config["""batch_size"""] ) _snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case__ ) def inner_training_loop(snake_case__ : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Tuple = model.to(accelerator.device ) # Instantiate optimizer _snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ ) _snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate scheduler _snake_case : str = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _snake_case : int = model(**snake_case__ ) _snake_case : str = outputs.loss accelerator.backward(snake_case__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : int = model(**snake_case__ ) _snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) _snake_case : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , snake_case__ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _snake_case : Dict = parser.parse_args() _snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
64
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : list ): """simple docstring""" if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] _snake_case : List[Any] = [] def generate(snake_case__ : int , snake_case__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even _snake_case , _snake_case : Optional[Any] = arr[k - 1], arr[i] else: # k is odd _snake_case , _snake_case : List[str] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
64
1
"""simple docstring""" def UpperCAmelCase__ (snake_case__ : Tuple ): """simple docstring""" _snake_case : Any = len(snake_case__ ) for i in range(length - 1 ): _snake_case : Dict = i for k in range(i + 1 , snake_case__ ): if collection[k] < collection[least]: _snake_case : Optional[Any] = k if least != i: _snake_case , _snake_case : List[str] = (collection[i], collection[least]) return collection if __name__ == "__main__": A_ = input('''Enter numbers separated by a comma:\n''').strip() A_ = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
64
"""simple docstring""" from math import factorial A_ = {str(d): factorial(d) for d in range(10)} def UpperCAmelCase__ (snake_case__ : int ): """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) ) def UpperCAmelCase__ (): """simple docstring""" _snake_case : List[str] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i ) if __name__ == "__main__": print(F'''{solution() = }''')
64
1
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Any ): '''simple docstring''' debug_launcher(test_script.main ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' debug_launcher(test_ops.main )
64
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ): """simple docstring""" if len(snake_case__ ) < k or k < 0: raise ValueError("""Invalid Input""" ) _snake_case : Optional[int] = sum(array[:k] ) for i in range(len(snake_case__ ) - k ): _snake_case : Optional[Any] = current_sum - array[i] + array[i + k] _snake_case : List[str] = max(snake_case__ , snake_case__ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() A_ = [randint(-10_00, 10_00) for i in range(1_00)] A_ = randint(0, 1_10) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
64
1