code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowerCAmelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Dict: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = num_choices lowerCAmelCase_ = scope lowerCAmelCase_ = self.vocab_size - 1 def __a ( self ) -> List[str]: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase_ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> Tuple: lowerCAmelCase_ = OpenAIGPTModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , head_mask=_UpperCamelCase ) lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase ) lowerCAmelCase_ = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase_ = OpenAIGPTLMHeadModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> Any: lowerCAmelCase_ = OpenAIGPTDoubleHeadsModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> Optional[Any]: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = OpenAIGPTForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self.prepare_config_and_inputs() ( lowerCAmelCase_ ) = config_and_inputs lowerCAmelCase_ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _lowercase =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _lowercase =( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]: lowerCAmelCase_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase , ) lowerCAmelCase_ = inputs_dict["""labels"""] lowerCAmelCase_ = inputs_dict["""labels"""] lowerCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_UpperCamelCase , ) lowerCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase ) return inputs_dict def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = OpenAIGPTModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , n_embd=37 ) def __a ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def __a ( self ) -> Any: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_UpperCamelCase ) def __a ( self ) -> Any: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_UpperCamelCase ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_UpperCamelCase ) def __a ( self ) -> Any: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_UpperCamelCase ) @slow def __a ( self ) -> List[str]: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = OpenAIGPTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): @slow def __a ( self ) -> str: lowerCAmelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(_UpperCamelCase ) lowerCAmelCase_ = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_UpperCamelCase ) # the president is lowerCAmelCase_ = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCAmelCase_ = model.generate(_UpperCamelCase , do_sample=_UpperCamelCase ) self.assertListEqual(output_ids[0].tolist() , _UpperCamelCase )
231
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""", """BridgeTower/bridgetower-base-itm-mlm""": ( """https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json""" ), } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Dict = """bridgetower_vision_model""" def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ): super().__init__(**__magic_name__ ) lowerCamelCase : Dict = hidden_size lowerCamelCase : str = num_hidden_layers lowerCamelCase : Optional[int] = num_channels lowerCamelCase : List[str] = patch_size lowerCamelCase : Tuple = image_size lowerCamelCase : Any = initializer_factor lowerCamelCase : Tuple = layer_norm_eps lowerCamelCase : Tuple = stop_gradient lowerCamelCase : Optional[int] = share_layernorm lowerCamelCase : str = remove_last_layer @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if config_dict.get("""model_type""" ) == "bridgetower": lowerCamelCase : str = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Union[str, Any] = """bridgetower_text_model""" def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ): super().__init__(**__magic_name__ ) lowerCamelCase : int = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Any = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : Tuple = hidden_act lowerCamelCase : Optional[int] = initializer_factor lowerCamelCase : Any = intermediate_size lowerCamelCase : List[str] = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : str = max_position_embeddings lowerCamelCase : Union[str, Any] = type_vocab_size lowerCamelCase : Optional[int] = layer_norm_eps lowerCamelCase : Optional[int] = position_embedding_type lowerCamelCase : List[str] = use_cache lowerCamelCase : List[str] = pad_token_id lowerCamelCase : List[str] = bos_token_id lowerCamelCase : Optional[int] = eos_token_id @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ ) if config_dict.get("""model_type""" ) == "bridgetower": lowerCamelCase : Optional[int] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__magic_name__ , **__magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Dict = """bridgetower""" def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ): # TODO: remove this once the Hub files are updated. lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ ) lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ ) super().__init__(**__magic_name__ ) lowerCamelCase : str = share_cross_modal_transformer_layers lowerCamelCase : Union[str, Any] = hidden_act lowerCamelCase : str = hidden_size lowerCamelCase : Tuple = initializer_factor lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : int = share_link_tower_layers lowerCamelCase : List[Any] = link_tower_type lowerCamelCase : Tuple = num_attention_heads lowerCamelCase : int = num_hidden_layers lowerCamelCase : Union[str, Any] = tie_word_embeddings lowerCamelCase : Tuple = init_layernorm_from_vision_encoder if text_config is None: lowerCamelCase : Any = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: lowerCamelCase : int = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ ) lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : str = copy.deepcopy(self.__dict__ ) lowerCamelCase : int = self.text_config.to_dict() lowerCamelCase : Dict = self.vision_config.to_dict() lowerCamelCase : List[str] = self.__class__.model_type return output
287
0
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a: List[str] = logging.get_logger(__name__) __a: Optional[int] = """▁""" __a: Any = {"""vocab_file""": """prophetnet.tokenizer"""} __a: List[Any] = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } __a: Tuple = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } __a: List[Any] = { """microsoft/xprophetnet-large-wiki100-cased""": 5_12, } def __UpperCamelCase ( UpperCAmelCase ): lowercase__ : str = collections.OrderedDict() with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader: lowercase__ : int = reader.readlines() for index, token in enumerate(UpperCAmelCase ): lowercase__ : List[Any] = token.rstrip('''\n''' ) lowercase__ : List[str] = index return vocab class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str: lowercase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise lowercase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCAmelCase ) ) lowercase__ : Optional[int] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab lowercase__ : Any = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(10 ): lowercase__ : Dict = F"""[unused{i}]""" lowercase__ : List[Any] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab lowercase__ : Any = 12 lowercase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__lowerCAmelCase ) def __getstate__( self ) -> int: lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : Union[str, Any] = None return state def __setstate__( self , __lowerCAmelCase ) -> Union[str, Any]: lowercase__ : str = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ : int = {} lowercase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> Optional[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return ([0] * len(__lowerCAmelCase )) + [1] return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Dict: lowercase__ : Tuple = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase( self ) -> Tuple: return len(self.sp_model ) + self.fairseq_offset def _lowerCAmelCase( self ) -> int: lowercase__ : List[str] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]: return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : List[str] = self.sp_model.PieceToId(__lowerCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]: lowercase__ : Any = """""".join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip() return out_string def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[Any]: if not os.path.isdir(__lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : List[Any] = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , '''wb''' ) as fi: lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] lowercase__ : List[str] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
198
def _a ( lowerCamelCase = 100_0000 ): lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) ) primes.add(2 ) for p in range(3, lowerCamelCase, 2 ): if p not in primes: continue primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) ) lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )] for p in primes: for n in range(lowerCamelCase, limit + 1, lowerCamelCase ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
287
0
"""simple docstring""" import requests from bsa import BeautifulSoup def lowercase ( A_ = "AAPL" )-> Any: '''simple docstring''' a : List[str] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' a : int = BeautifulSoup(requests.get(A_ ).text , "html.parser" ) a : int = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
40
from __future__ import annotations import collections import pprint from pathlib import Path def _a ( lowerCamelCase ): return "".join(sorted(lowerCamelCase ) ) def _a ( lowerCamelCase ): return word_by_signature[signature(lowerCamelCase )] _lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()}) _lowerCamelCase =collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
287
0
"""simple docstring""" class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Any: _a = size _a = [0] * size _a = [0] * size @staticmethod def _UpperCAmelCase ( __UpperCAmelCase ) -> List[Any]: return index | (index + 1) @staticmethod def _UpperCAmelCase ( __UpperCAmelCase ) -> Dict: return (index & (index + 1)) - 1 def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _a = value while index < self.size: _a = self.get_prev(__UpperCAmelCase ) + 1 if current_left_border == index: _a = value else: _a = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = self.get_next(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: right -= 1 # Because of right is exclusive _a = 0 while left <= right: _a = self.get_prev(__UpperCAmelCase ) if left <= current_left: _a = max(__UpperCAmelCase , self.tree[right] ) _a = current_left else: _a = max(__UpperCAmelCase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
320
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCamelCase =logging.get_logger(__name__) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Union[str, Any] = ["""pixel_values"""] def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ): super().__init__(**__magic_name__ ) lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4} lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) lowerCamelCase : Dict = do_resize lowerCamelCase : List[Any] = size # Default value set here for backwards compatibility where the value in config is None lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 lowerCamelCase : Union[str, Any] = resample lowerCamelCase : str = do_rescale lowerCamelCase : Union[str, Any] = rescale_factor lowerCamelCase : Tuple = do_normalize lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ): lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) lowerCamelCase : str = size["""shortest_edge"""] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCamelCase : List[str] = int(shortest_edge / crop_pct ) lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ ) lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ): return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ): return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ): lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct lowerCamelCase : Optional[int] = resample if resample is not None else self.resample lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std lowerCamelCase : Dict = size if size is not None else self.size lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) lowerCamelCase : List[str] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images] if do_normalize: lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images] lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] lowerCamelCase : List[str] = {"""pixel_values""": images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
287
0
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowercase = DebertaTokenizer lowercase = True lowercase = DebertaTokenizerFast def lowerCamelCase ( self : str ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case__ : Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] snake_case__ : List[str] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) snake_case__ : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] snake_case__ : Any = {"""unk_token""": """[UNK]"""} snake_case__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(snake_case_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(snake_case_ ) ) def lowerCamelCase ( self : int , **snake_case_ : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase ( self : int , snake_case_ : Optional[Any] ): snake_case__ : int = """lower newer""" snake_case__ : List[Any] = """lower newer""" return input_text, output_text def lowerCamelCase ( self : List[str] ): snake_case__ : Union[str, Any] = self.get_tokenizer() snake_case__ : Union[str, Any] = """lower newer""" snake_case__ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] snake_case__ : Tuple = tokenizer.tokenize(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) snake_case__ : int = tokens + [tokenizer.unk_token] snake_case__ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) def lowerCamelCase ( self : Dict ): snake_case__ : Optional[Any] = self.get_tokenizer() snake_case__ : Optional[int] = tokenizer("""Hello""" , """World""" ) snake_case__ : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , snake_case_ ) @slow def lowerCamelCase ( self : List[str] ): snake_case__ : Union[str, Any] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) snake_case__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ ) snake_case__ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ ) snake_case__ : Optional[Any] = tokenizer.encode( """sequence builders""" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ ) snake_case__ : Union[str, Any] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ ) snake_case__ : str = tokenizer.build_inputs_with_special_tokens(snake_case_ ) snake_case__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCamelCase ( self : List[Any] ): snake_case__ : str = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: snake_case__ : int = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) snake_case__ : List[Any] = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ ) snake_case__ : Any = [tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) for seq in encoding["""input_ids"""]] # fmt: off snake_case__ : Union[str, Any] = { """input_ids""": [ [1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on snake_case__ : Any = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , snake_case_ ) for expected, decoded in zip(snake_case_ , snake_case_ ): self.assertEqual(snake_case_ , snake_case_ )
35
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 _lowerCamelCase ={ """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 1_2_8, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 5_0, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 1_0, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 1_0, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class A__ ( unittest.TestCase): @classmethod def UpperCamelCase__ ( cls ): lowerCamelCase : int = TOKEN HfFolder.save_token(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls ): try: delete_repo(token=cls._token , repo_id="""test-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-config""" ) except HTTPError: pass def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub("""test-config""" , use_auth_token=self._token ) lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token ) lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) def UpperCamelCase__ ( self ): lowerCamelCase : Dict = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token ) lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token ) lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) def UpperCamelCase__ ( self ): CustomConfig.register_for_auto_class() lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 ) config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} ) lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" ) self.assertEqual(new_config.attribute , 4_2 ) class A__ ( unittest.TestCase): def UpperCamelCase__ ( self ): lowerCamelCase : str = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated lowerCamelCase : Optional[int] = c.n_embd + 1 # int lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float lowerCamelCase : Tuple = not c.scale_attn_weights # bool lowerCamelCase : Any = c.summary_type + """foo""" # str c.update_from_string( F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" ) self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" ) self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" ) self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" ) def UpperCamelCase__ ( self ): lowerCamelCase : str = PretrainedConfig() lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( __magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] ) lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )] if len(__magic_name__ ) > 0: raise ValueError( """The following keys are set with the default values in""" """ `test_configuration_common.config_common_kwargs` pick another value for them:""" F''' {", ".join(__magic_name__ )}.''' ) def UpperCamelCase__ ( self ): with self.assertRaises(__magic_name__ ): # config is in subfolder, the following should not work without specifying the subfolder lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" ) lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" ) self.assertIsNotNone(__magic_name__ ) def UpperCamelCase__ ( self ): # A mock response for an HTTP head request to emulate server down lowerCamelCase : Dict = mock.Mock() lowerCamelCase : Optional[int] = 5_0_0 lowerCamelCase : List[Any] = {} lowerCamelCase : Tuple = HTTPError lowerCamelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head: lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase__ ( self ): # This test is for deprecated behavior and can be removed in v5 lowerCamelCase : List[str] = BertConfig.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" ) def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" ) lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(__magic_name__ ) lowerCamelCase : str = 2 json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 lowerCamelCase : Any = ["""config.42.0.0.json"""] lowerCamelCase : Optional[Any] = 7_6_8 configuration.save_pretrained(__magic_name__ ) shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) ) lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ ) self.assertEqual(new_configuration.hidden_size , 7_6_8 ) def UpperCamelCase__ ( self ): # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. lowerCamelCase : str = """hf-internal-testing/test-two-configs""" import transformers as new_transformers lowerCamelCase : Tuple = """v4.0.0""" lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained( __magic_name__ , return_unused_kwargs=__magic_name__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(__magic_name__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers lowerCamelCase : Tuple = """v3.0.0""" lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ ) self.assertEqual(old_configuration.hidden_size , 7_6_8 )
287
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) SCREAMING_SNAKE_CASE = """xvjiarui/stable-diffusion-2-inpainting""" SCREAMING_SNAKE_CASE = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ ,safety_checker=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench""" SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE = 50 SCREAMING_SNAKE_CASE = jax.device_count() SCREAMING_SNAKE_CASE = num_samples * [prompt] SCREAMING_SNAKE_CASE = num_samples * [init_image] SCREAMING_SNAKE_CASE = num_samples * [mask_image] SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # shard inputs and rng SCREAMING_SNAKE_CASE = replicate(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jax.random.split(lowerCamelCase__ ,jax.device_count() ) SCREAMING_SNAKE_CASE = shard(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = shard(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = shard(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = pipeline( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,jit=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = output.images.reshape(lowerCamelCase__ ,512 ,512 ,3 ) SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1] SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) ) SCREAMING_SNAKE_CASE = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
296
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase): @slow def UpperCamelCase__ ( self ): lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) lowerCamelCase : Any = tf.convert_to_tensor( [[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""] lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) ) self.assertEqual(output.shape , __magic_name__ ) # compare the actual values for a slice. lowerCamelCase : Dict = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
287
0
'''simple docstring''' _UpperCamelCase : str = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : int = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Optional[Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger _lowerCamelCase =get_logger(__name__) class A__ : def __init__( self , __magic_name__ = None ): lowerCamelCase : Dict = ( os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCamelCase : List[str] = Extractor def UpperCamelCase__ ( self , __magic_name__ ): from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCamelCase : int = os.path.abspath(__magic_name__ ) return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ): return force_extract or ( not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ )) ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ): lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ ) if not extractor_format: return input_path lowerCamelCase : int = self._get_output_path(__magic_name__ ) if self._do_extract(__magic_name__ , __magic_name__ ): self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ ) return output_path class A__ ( __SCREAMING_SNAKE_CASE): @classmethod @abstractmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): ... @staticmethod @abstractmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): ... class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): _UpperCAmelCase : List[bytes] = [] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): with open(__magic_name__ , """rb""" ) as f: return f.read(__magic_name__ ) @classmethod def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ): if not magic_number: lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers ) try: lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ ) except OSError: return False return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers ) class A__ ( __SCREAMING_SNAKE_CASE): @classmethod def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ): return tarfile.is_tarfile(__magic_name__ ) @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): def resolved(__magic_name__ ) -> str: return os.path.realpath(os.path.abspath(__magic_name__ ) ) def badpath(__magic_name__ , __magic_name__ ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ ) def badlink(__magic_name__ , __magic_name__ ) -> bool: # Links are interpreted relative to the directory containing the link lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=__magic_name__ ) lowerCamelCase : Optional[Any] = resolved(__magic_name__ ) for finfo in members: if badpath(finfo.name , __magic_name__ ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Dict = tarfile.open(__magic_name__ ) tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) ) tar_file.close() class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : str = [B"""\x1F\x8B"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): with gzip.open(__magic_name__ , """rb""" ) as gzip_file: with open(__magic_name__ , """wb""" ) as extracted_file: shutil.copyfileobj(__magic_name__ , __magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Optional[int] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ): if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(__magic_name__ , """rb""" ) as fp: lowerCamelCase : List[str] = _EndRecData(__magic_name__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be if len(__magic_name__ ) == sizeCentralDir: lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file: zip_file.extractall(__magic_name__ ) zip_file.close() class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): with lzma.open(__magic_name__ ) as compressed_file: with open(__magic_name__ , """wb""" ) as extracted_file: shutil.copyfileobj(__magic_name__ , __magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ ) rf.extractall(__magic_name__ ) rf.close() class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd lowerCamelCase : int = zstd.ZstdDecompressor() with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh: dctx.copy_stream(__magic_name__ , __magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Any = [B"""\x42\x5A\x68"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): with bza.open(__magic_name__ , """rb""" ) as compressed_file: with open(__magic_name__ , """wb""" ) as extracted_file: shutil.copyfileobj(__magic_name__ , __magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive: archive.extractall(__magic_name__ ) class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file: with open(__magic_name__ , """wb""" ) as extracted_file: shutil.copyfileobj(__magic_name__ , __magic_name__ ) class A__ : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) _UpperCAmelCase : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCamelCase__ ( cls ): return max( len(__magic_name__ ) for extractor in cls.extractors.values() if issubclass(__magic_name__ , __magic_name__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCamelCase__ ( __magic_name__ , __magic_name__ ): try: return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ ) except OSError: return b"" @classmethod def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ): warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=__magic_name__ , ) lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/> lowerCamelCase : Dict = cls._get_magic_number_max_length() lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ): return extractor_format @classmethod def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ): os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ ) # Prevent parallel extractions lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) ) with FileLock(__magic_name__ ): shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=__magic_name__ , ) lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format else: lowerCamelCase : Optional[int] = cls.extractors[extractor_format] return extractor.extract(__magic_name__ , __magic_name__ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=__magic_name__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(__magic_name__ ): return extractor.extract(__magic_name__ , __magic_name__ )
287
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> int: __lowerCamelCase : Tuple = tempfile.mkdtemp() __lowerCamelCase : Tuple = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __lowerCamelCase : Any = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCamelCase : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> int: return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Dict: return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase : Any = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> Dict: __lowerCamelCase : Dict = self.get_tokenizer() __lowerCamelCase : List[Any] = self.get_rust_tokenizer() __lowerCamelCase : Tuple = self.get_image_processor() __lowerCamelCase : List[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase : int = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: __lowerCamelCase : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __lowerCamelCase : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) __lowerCamelCase : List[Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[Any]: __lowerCamelCase : int = self.get_image_processor() __lowerCamelCase : Union[str, Any] = self.get_tokenizer() __lowerCamelCase : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = self.prepare_image_inputs() __lowerCamelCase : Tuple = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ) __lowerCamelCase : Dict = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase_ ( self ) -> str: __lowerCamelCase : int = self.get_image_processor() __lowerCamelCase : Any = self.get_tokenizer() __lowerCamelCase : Union[str, Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = """lower newer""" __lowerCamelCase : List[str] = processor(text=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : List[Any] = self.get_image_processor() __lowerCamelCase : Dict = self.get_tokenizer() __lowerCamelCase : str = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = """lower newer""" __lowerCamelCase : Optional[int] = self.prepare_image_inputs() __lowerCamelCase : Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : List[Any] = self.get_image_processor() __lowerCamelCase : Optional[int] = self.get_tokenizer() __lowerCamelCase : Dict = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : Tuple = self.get_image_processor() __lowerCamelCase : int = self.get_tokenizer() __lowerCamelCase : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = """lower newer""" __lowerCamelCase : int = self.prepare_image_inputs() __lowerCamelCase : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
185
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _lowerCamelCase =5_0_0_0_0_0 _lowerCamelCase , _lowerCamelCase =os.path.split(__file__) _lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _a ( lowerCamelCase, **lowerCamelCase ): lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase ) @get_duration def _a ( lowerCamelCase, **lowerCamelCase ): lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase ) def _a ( ): lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) lowerCamelCase : Tuple = generate_example_dataset( os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase ) lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase ) def tokenize(lowerCamelCase ): return tokenizer(examples["""text"""] ) lowerCamelCase : List[str] = map(lowerCamelCase ) lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase ) lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""numpy""" ): lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""pandas""" ): lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""torch""", columns="""numbers""" ): lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ): lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase ) lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase ) lowerCamelCase : Union[str, Any] = filter(lowerCamelCase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowerCamelCase, """wb""" ) as f: f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
287
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor UpperCamelCase__: Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self : List[str] , *__snake_case : Optional[int] , **__snake_case : Any ) -> List[str]: warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , __snake_case , ) super().__init__(*__snake_case , **__snake_case )
23
def _a ( lowerCamelCase ): if p < 2: raise ValueError("""p should not be less than 2!""" ) elif p == 2: return True lowerCamelCase : Any = 4 lowerCamelCase : List[str] = (1 << p) - 1 for _ in range(p - 2 ): lowerCamelCase : List[Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(1_1))
287
0
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin __snake_case : Any = get_tests_dir("""fixtures/test_sentencepiece.model""") __snake_case : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") __snake_case : List[Any] = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class A__(__SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" _A : str = CamembertTokenizer _A : str = CamembertTokenizerFast _A : Tuple = True _A : Dict = True def UpperCamelCase__ ( self ) -> int: super().setUp() # We have a SentencePiece fixture for testing a_ : Optional[Any] = CamembertTokenizer(_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ) -> Optional[Any]: a_ : Any = """<pad>""" a_ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase ) def UpperCamelCase__ ( self ) -> List[Any]: a_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(_lowercase ) , 1_004 ) def UpperCamelCase__ ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 1_005 ) def UpperCamelCase__ ( self ) -> Optional[int]: a_ : List[str] = CamembertTokenizer(_lowercase ) tokenizer.save_pretrained(self.tmpdirname ) a_ : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) a_ : List[Any] = """I was born in 92000, and this is falsé.""" a_ : List[str] = tokenizer.encode(_lowercase ) a_ : List[Any] = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) a_ : Dict = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) a_ : str = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) a_ : Any = tokenizer.convert_ids_to_tokens(_lowercase ) a_ : str = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) def UpperCamelCase__ ( self ) -> Optional[Any]: if not self.test_rust_tokenizer: return a_ : str = self.get_tokenizer() a_ : List[str] = self.get_rust_tokenizer() a_ : str = """I was born in 92000, and this is falsé.""" a_ : List[str] = tokenizer.tokenize(_lowercase ) a_ : Optional[Any] = rust_tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) a_ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) a_ : List[Any] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) a_ : Tuple = self.get_rust_tokenizer() a_ : List[Any] = tokenizer.encode(_lowercase ) a_ : Tuple = rust_tokenizer.encode(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) @slow def UpperCamelCase__ ( self ) -> Optional[int]: # fmt: off a_ : Dict = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. a_ : Union[str, Any] = [ """Le transformeur est un modèle d'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=_lowercase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_lowercase , )
248
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor _lowerCamelCase =logging.get_logger(__name__) class A__ ( __SCREAMING_SNAKE_CASE): def __init__( self , *__magic_name__ , **__magic_name__ ): warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" , __magic_name__ , ) super().__init__(*__magic_name__ , **__magic_name__ )
287
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case_ = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Dict = """segformer""" def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ): super().__init__(**__magic_name__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , ) lowerCamelCase : Optional[Any] = num_channels lowerCamelCase : str = num_encoder_blocks lowerCamelCase : Any = depths lowerCamelCase : List[Any] = sr_ratios lowerCamelCase : int = hidden_sizes lowerCamelCase : Union[str, Any] = patch_sizes lowerCamelCase : Optional[Any] = strides lowerCamelCase : Dict = mlp_ratios lowerCamelCase : str = num_attention_heads lowerCamelCase : Any = hidden_act lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase : Dict = classifier_dropout_prob lowerCamelCase : Tuple = initializer_range lowerCamelCase : Dict = drop_path_rate lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : Any = decoder_hidden_size lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ ) lowerCamelCase : Dict = semantic_loss_ignore_index class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : str = version.parse("""1.11""") @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCamelCase__ ( self ): return 1e-4 @property def UpperCamelCase__ ( self ): return 1_2
287
0
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] ): """simple docstring""" lowerCAmelCase_ = {} lowerCAmelCase_ = tokenizer(example["content"] , truncation=__lowerCAmelCase )["""input_ids"""] lowerCAmelCase_ = len(example["content"] ) / len(output["input_ids"] ) return output _A = HfArgumentParser(PretokenizationArguments) _A = parser.parse_args() if args.num_workers is None: _A = multiprocessing.cpu_count() _A = AutoTokenizer.from_pretrained(args.tokenizer_dir) _A = time.time() _A = load_dataset(args.dataset_name, split="train") print(f"""Dataset loaded in {time.time()-t_start:.2f}s""") _A = time.time() _A = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""") _A = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
231
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : List[Any] = """gpt_neo""" _UpperCAmelCase : Union[str, Any] = ["""past_key_values"""] _UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ): lowerCamelCase : List[Any] = vocab_size lowerCamelCase : str = max_position_embeddings lowerCamelCase : str = hidden_size lowerCamelCase : Optional[int] = num_layers lowerCamelCase : str = num_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : List[Any] = window_size lowerCamelCase : int = activation_function lowerCamelCase : Union[str, Any] = resid_dropout lowerCamelCase : List[Any] = embed_dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Dict = classifier_dropout lowerCamelCase : Any = layer_norm_epsilon lowerCamelCase : Dict = initializer_range lowerCamelCase : Dict = use_cache lowerCamelCase : Optional[Any] = bos_token_id lowerCamelCase : int = eos_token_id lowerCamelCase : List[Any] = attention_types lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) @staticmethod def UpperCamelCase__ ( __magic_name__ ): lowerCamelCase : Optional[int] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): import torch lowerCamelCase : Any = input.size() lowerCamelCase : List[Any] = len(lowerCamelCase ) lowerCamelCase : Optional[Any] = shape[dimension] lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase ) lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1 lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None] lowerCamelCase : str = [slice(lowerCamelCase )] * rank lowerCamelCase : List[str] = indices lowerCamelCase : Dict = input[s] lowerCamelCase : Any = list(range(0, rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase ): import torch lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase ) lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase ) lowerCamelCase : List[Any] = remainders == 0 lowerCamelCase : List[Any] = candidates[divisor_indices] lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase ) return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" ) class A__ ( __SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCamelCase__ ( self ): return self._config.num_heads def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ): lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase : Optional[int] = seqlen + 2 lowerCamelCase : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase : str = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] lowerCamelCase : Tuple = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype lowerCamelCase : Any = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def UpperCamelCase__ ( self ): return 1_3
287
0
'''simple docstring''' import qiskit def __UpperCamelCase ( UpperCAmelCase = 2 ): lowercase__ : List[Any] = qubits # Using Aer's simulator lowercase__ : int = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register lowercase__ : str = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , UpperCAmelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , UpperCAmelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(UpperCAmelCase ) ) , list(range(UpperCAmelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator lowercase__ : List[str] = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 ) return job.result().get_counts(UpperCAmelCase ) if __name__ == "__main__": print(F'Total count for various states are: {quantum_entanglement(3)}')
198
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
287
0
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class _A ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Union[str, Any] = RoFormerTokenizer UpperCAmelCase : Optional[int] = RoFormerTokenizerFast UpperCAmelCase : Dict = True UpperCAmelCase : List[str] = True def __snake_case ( self : List[str]): super().setUp() def __snake_case ( self : str , **__UpperCAmelCase : Tuple): return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , **__UpperCAmelCase : Optional[Any]): return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **__UpperCAmelCase) def __snake_case ( self : List[str]): a : Optional[int] = """永和服装饰品有限公司,今天天气非常好""" a : str = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def __snake_case ( self : Any): a : int = self.get_tokenizer() a : Union[str, Any] = self.get_chinese_input_output_texts() a : int = tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , output_text.split()) a : Union[str, Any] = tokens + [tokenizer.unk_token] a : List[Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : Any): a : List[str] = self.get_rust_tokenizer() a : List[str] = self.get_chinese_input_output_texts() a : int = tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , output_text.split()) a : Union[str, Any] = tokens + [tokenizer.unk_token] a : Union[str, Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : Any): pass def __snake_case ( self : str): pass def __snake_case ( self : Tuple): pass
40
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): for param, grad_param in zip(model_a.parameters(), model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ): model.train() lowerCamelCase : Dict = model(lowerCamelCase ) lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCamelCase ) def _a ( lowerCamelCase, lowerCamelCase=False ): set_seed(42 ) lowerCamelCase : Tuple = RegressionModel() lowerCamelCase : Any = deepcopy(lowerCamelCase ) lowerCamelCase : Any = RegressionDataset(length=80 ) lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 ) model.to(accelerator.device ) if sched: lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 ) lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 ) lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 ) lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 ) # Make a copy of `model` if sched: lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) else: lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _a ( lowerCamelCase ): # Test when on a single CPU or GPU that the context manager does nothing lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase ) # Use a single batch lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase ): step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) else: # Sync grads step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )] def _a ( lowerCamelCase ): # Test on distributed setup that context manager behaves properly lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase ) # Use a single batch lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase ): step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) else: # Sync grads step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )] def _a ( lowerCamelCase=False, lowerCamelCase=False ): lowerCamelCase : Any = Accelerator( split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase ) for iteration, batch in enumerate(lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCamelCase ): step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )] GradientState._reset_state() def _a ( lowerCamelCase=False, lowerCamelCase=False ): lowerCamelCase : List[Any] = Accelerator( split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase ) for iteration, batch in enumerate(lowerCamelCase ): lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCamelCase ): step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase )) if accelerator.num_processes > 1: check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def _a ( ): lowerCamelCase : int = Accelerator() lowerCamelCase : Optional[Any] = RegressionDataset(length=80 ) lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 ) lowerCamelCase : int = RegressionDataset(length=96 ) lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 ) lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase ) if iteration < len(lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase ) if batch_num < len(lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _a ( ): lowerCamelCase : List[Any] = Accelerator() lowerCamelCase : int = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCamelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCamelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', ) test_gradient_accumulation(lowerCamelCase, lowerCamelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', ) test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase ) def _a ( lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
287
0
"""simple docstring""" from __future__ import annotations __snake_case = [True] * 1000001 __snake_case = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): __snake_case = False i += 1 def A_ ( _lowerCAmelCase : int ): """simple docstring""" return seive[n] def A_ ( _lowerCAmelCase : Optional[Any] ): """simple docstring""" return any(digit in '''02468''' for digit in str(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase : Union[str, Any] = 1_00_00_00 ): """simple docstring""" _a = [2] # result already includes the number 2. for num in range(3, limit + 1, 2 ): if is_prime(_lowerCAmelCase ) and not contains_an_even_digit(_lowerCAmelCase ): _a = str(_lowerCAmelCase ) _a = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCAmelCase ) )] if all(is_prime(_lowerCAmelCase ) for i in list_nums ): result.append(_lowerCAmelCase ) return result def A_ ( ): """simple docstring""" return len(find_circular_primes() ) if __name__ == "__main__": print(f'{len(find_circular_primes()) = }')
320
from scipy.stats import pearsonr import datasets _lowerCamelCase =""" Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ _lowerCamelCase =""" Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ _lowerCamelCase =""" @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): def UpperCamelCase__ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ): if return_pvalue: lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
287
0
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __a = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" @register_to_config def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict = None , snake_case_ : Optional[int] = None ): super().__init__() snake_case__ : int = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" snake_case__ : List[Any] = torch.zeros(snake_case_ , snake_case_ ) else: snake_case__ : List[Any] = None snake_case__ : List[Any] = torch.nn.Parameter(snake_case_ ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 lowercase = 42 def __init__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : str , ): super().__init__() self.register_modules( vqvae=snake_case_ , transformer=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , scheduler=snake_case_ , learned_classifier_free_sampling_embeddings=snake_case_ , ) def lowerCamelCase ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Optional[Any] ): snake_case__ : int = len(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else 1 # get prompt text embeddings snake_case__ : Tuple = self.tokenizer( snake_case_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) snake_case__ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: snake_case__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) snake_case__ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length] snake_case__ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 snake_case__ : Tuple = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case_ ) # duplicate text embeddings for each generation per prompt snake_case__ : Optional[Any] = prompt_embeds.repeat_interleave(snake_case_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: snake_case__ : Any = self.learned_classifier_free_sampling_embeddings.embeddings snake_case__ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case_ , 1 , 1 ) else: snake_case__ : Optional[int] = [""""""] * batch_size snake_case__ : int = text_input_ids.shape[-1] snake_case__ : Tuple = self.tokenizer( snake_case_ , padding="""max_length""" , max_length=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" , ) snake_case__ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings snake_case__ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method snake_case__ : Dict = negative_prompt_embeds.shape[1] snake_case__ : int = negative_prompt_embeds.repeat(1 , snake_case_ , 1 ) snake_case__ : Tuple = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case__ : List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Any , snake_case_ : Optional[int] , snake_case_ : List[str] = 100 , snake_case_ : Dict = 5.0 , snake_case_ : Tuple = 1.0 , snake_case_ : List[Any] = 1 , snake_case_ : Union[str, Any] = None , snake_case_ : List[Any] = None , snake_case_ : List[str] = "pil" , snake_case_ : Dict = True , snake_case_ : Optional[Any] = None , snake_case_ : Optional[int] = 1 , ): if isinstance(snake_case_ , snake_case_ ): snake_case__ : List[str] = 1 elif isinstance(snake_case_ , snake_case_ ): snake_case__ : Dict = len(snake_case_ ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case_ )}" ) snake_case__ : Tuple = batch_size * num_images_per_prompt snake_case__ : Dict = guidance_scale > 1.0 snake_case__ : Tuple = self._encode_prompt(snake_case_ , snake_case_ , snake_case_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(snake_case_ )}." ) # get the initial completely masked latents unless the user supplied it snake_case__ : List[Any] = (batch_size, self.transformer.num_latent_pixels) if latents is None: snake_case__ : int = self.transformer.num_vector_embeds - 1 snake_case__ : Any = torch.full(snake_case_ , snake_case_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" f" {self.transformer.num_vector_embeds - 1} (inclusive)." ) snake_case__ : Union[str, Any] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(snake_case_ , device=self.device ) snake_case__ : Tuple = self.scheduler.timesteps.to(self.device ) snake_case__ : Any = latents for i, t in enumerate(self.progress_bar(snake_case_ ) ): # expand the sample if we are doing classifier free guidance snake_case__ : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` snake_case__ : Tuple = self.transformer(snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ ).sample if do_classifier_free_guidance: snake_case__ : Optional[int] = model_output.chunk(2 ) snake_case__ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(snake_case_ , dim=1 , keepdim=snake_case_ ) snake_case__ : List[str] = self.truncate(snake_case_ , snake_case_ ) # remove `log(0)`'s (`-inf`s) snake_case__ : List[Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 snake_case__ : Dict = self.scheduler.step(snake_case_ , timestep=snake_case_ , sample=snake_case_ , generator=snake_case_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : int = self.vqvae.config.vq_embed_dim snake_case__ : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) snake_case__ : Optional[int] = self.vqvae.quantize.get_codebook_entry(snake_case_ , shape=snake_case_ ) snake_case__ : Any = self.vqvae.decode(snake_case_ , force_not_quantize=snake_case_ ).sample snake_case__ : Any = (image / 2 + 0.5).clamp(0 , 1 ) snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case__ : List[Any] = self.numpy_to_pil(snake_case_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case_ ) def lowerCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Optional[Any] ): snake_case__ : Optional[Any] = torch.sort(snake_case_ , 1 , descending=snake_case_ ) snake_case__ : List[Any] = torch.exp(snake_case_ ) snake_case__ : Dict = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out snake_case__ : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case_ ) snake_case__ : Union[str, Any] = torch.cat((all_true, keep_mask) , dim=1 ) snake_case__ : Optional[int] = keep_mask[:, :-1, :] snake_case__ : str = keep_mask.gather(1 , indices.argsort(1 ) ) snake_case__ : Optional[Any] = log_p_x_0.clone() snake_case__ : str = -torch.inf # -inf = log(0) return rv
35
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCamelCase =logging.get_logger(__name__) _lowerCamelCase ={ """microsoft/conditional-detr-resnet-50""": ( """https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json""" ), } class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Union[str, Any] = """conditional_detr""" _UpperCAmelCase : Optional[int] = ["""past_key_values"""] _UpperCAmelCase : Optional[Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase : List[Any] = backbone_config.get("""model_type""" ) lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type] lowerCamelCase : str = config_class.from_dict(__magic_name__ ) lowerCamelCase : Dict = use_timm_backbone lowerCamelCase : str = backbone_config lowerCamelCase : Tuple = num_channels lowerCamelCase : Dict = num_queries lowerCamelCase : Any = d_model lowerCamelCase : Optional[Any] = encoder_ffn_dim lowerCamelCase : List[str] = encoder_layers lowerCamelCase : Union[str, Any] = encoder_attention_heads lowerCamelCase : Any = decoder_ffn_dim lowerCamelCase : Dict = decoder_layers lowerCamelCase : Union[str, Any] = decoder_attention_heads lowerCamelCase : Dict = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Union[str, Any] = activation_dropout lowerCamelCase : Optional[int] = activation_function lowerCamelCase : int = init_std lowerCamelCase : str = init_xavier_std lowerCamelCase : Tuple = encoder_layerdrop lowerCamelCase : str = decoder_layerdrop lowerCamelCase : Tuple = encoder_layers lowerCamelCase : Optional[int] = auxiliary_loss lowerCamelCase : Optional[Any] = position_embedding_type lowerCamelCase : Optional[int] = backbone lowerCamelCase : Union[str, Any] = use_pretrained_backbone lowerCamelCase : str = dilation # Hungarian matcher lowerCamelCase : Optional[Any] = class_cost lowerCamelCase : Dict = bbox_cost lowerCamelCase : Tuple = giou_cost # Loss coefficients lowerCamelCase : Union[str, Any] = mask_loss_coefficient lowerCamelCase : Dict = dice_loss_coefficient lowerCamelCase : Optional[int] = cls_loss_coefficient lowerCamelCase : Optional[int] = bbox_loss_coefficient lowerCamelCase : Optional[int] = giou_loss_coefficient lowerCamelCase : Optional[int] = focal_alpha super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ ) @property def UpperCamelCase__ ( self ): return self.encoder_attention_heads @property def UpperCamelCase__ ( self ): return self.d_model def UpperCamelCase__ ( self ): lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase : Optional[int] = self.backbone_config.to_dict() lowerCamelCase : Optional[Any] = self.__class__.model_type return output class A__ ( __SCREAMING_SNAKE_CASE): _UpperCAmelCase : Any = version.parse("""1.11""") @property def UpperCamelCase__ ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def UpperCamelCase__ ( self ): return 1e-5 @property def UpperCamelCase__ ( self ): return 1_2
287
0
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
296
import json import sys def _a ( lowerCamelCase, lowerCamelCase ): with open(lowerCamelCase, encoding="""utf-8""" ) as f: lowerCamelCase : List[Any] = json.load(lowerCamelCase ) lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(lowerCamelCase ): lowerCamelCase : List[Any] = results[benchmark_name] lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) lowerCamelCase : Any = """| metric |""" lowerCamelCase : str = """|--------|""" lowerCamelCase : List[Any] = """| new / old (diff) |""" for metric_name in sorted(lowerCamelCase ): lowerCamelCase : List[Any] = benchmark_res[metric_name] lowerCamelCase : Tuple = metric_vals["""new"""] lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase ) lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase ) lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None""" if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f: f.writelines("""\n""".join(lowerCamelCase ) ) if __name__ == "__main__": _lowerCamelCase =sys.argv[1] _lowerCamelCase =sys.argv[2] format_json_to_md(input_json_file, output_md_file)
287
0
'''simple docstring''' import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC _UpperCamelCase : Tuple = parse(importlib.metadata.version('torch')) def __UpperCAmelCase ( A : Tuple , A : List[Any] , A : Optional[int] ) -> List[Any]: if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" ) UpperCAmelCase_ : Optional[Any] = STR_OPERATION_TO_FUNC[operation] if isinstance(A , A ): UpperCAmelCase_ : List[Any] = parse(importlib.metadata.version(A ) ) return operation(A , parse(A ) ) def __UpperCAmelCase ( A : Optional[int] , A : Optional[Any] ) -> str: return compare_versions(A , A , A )
304
def _a ( lowerCamelCase ): return " ".join( """""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
287
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ): """simple docstring""" lowerCamelCase : Any = (DEISMultistepScheduler,) lowerCamelCase : Optional[Any] = (("""num_inference_steps""", 2_5),) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : str = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : str = dict(self.forward_default_kwargs ) __lowerCamelCase : Any = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = self.dummy_sample __lowerCamelCase : Dict = 0.1 * sample __lowerCamelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __lowerCamelCase : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals __lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals __lowerCamelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCamelCase : Optional[int] = sample, sample for t in range(SCREAMING_SNAKE_CASE_ , time_step + scheduler.config.solver_order + 1 ): __lowerCamelCase : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample __lowerCamelCase : str = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase_ ( self ) -> str: pass def lowercase_ ( self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs ) __lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = self.dummy_sample __lowerCamelCase : Tuple = 0.1 * sample __lowerCamelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __lowerCamelCase : List[str] = self.get_scheduler_config() __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals (must be after setting timesteps) __lowerCamelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residual (must be after setting timesteps) __lowerCamelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCamelCase : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample __lowerCamelCase : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]: if scheduler is None: __lowerCamelCase : List[Any] = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = 10 __lowerCamelCase : Optional[int] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample return sample def lowercase_ ( self ) -> int: __lowerCamelCase : int = dict(self.forward_default_kwargs ) __lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE_ ) for scheduler_class in self.scheduler_classes: __lowerCamelCase : List[str] = self.get_scheduler_config() __lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = self.dummy_sample __lowerCamelCase : str = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , 'set_timesteps' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , 'set_timesteps' ): __lowerCamelCase : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowerCamelCase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] __lowerCamelCase : Any = dummy_past_residuals[: scheduler.config.solver_order] __lowerCamelCase : Dict = scheduler.timesteps[5] __lowerCamelCase : Union[str, Any] = scheduler.timesteps[6] __lowerCamelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample __lowerCamelCase : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase_ ( self ) -> List[str]: # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCamelCase : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() ) __lowerCamelCase : Dict = self.full_loop(scheduler=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 __lowerCamelCase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCamelCase : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCamelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCamelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCamelCase : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 def lowercase_ ( self ) -> Optional[int]: for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , ) def lowercase_ ( self ) -> Dict: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , algorithm_type=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Union[str, Any] = self.full_loop( solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , algorithm_type=SCREAMING_SNAKE_CASE_ , ) assert not torch.isnan(SCREAMING_SNAKE_CASE_ ).any(), "Samples have nan numbers" def lowercase_ ( self ) -> Any: self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE_ ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ , time_step=0 ) def lowercase_ ( self ) -> str: __lowerCamelCase : Tuple = self.full_loop() __lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction' ) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3 def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Tuple = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0 ) __lowerCamelCase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = 10 __lowerCamelCase : Optional[int] = self.dummy_model() __lowerCamelCase : List[Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample assert sample.dtype == torch.floataa
185
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) _lowerCamelCase ="""pytorch_model.bin""" _lowerCamelCase ="""pytorch_model.bin.index.json""" _lowerCamelCase ="""adapter_config.json""" _lowerCamelCase ="""adapter_model.bin""" _lowerCamelCase ="""adapter_model.safetensors""" _lowerCamelCase ="""tf_model.h5""" _lowerCamelCase ="""tf_model.h5.index.json""" _lowerCamelCase ="""model.ckpt""" _lowerCamelCase ="""flax_model.msgpack""" _lowerCamelCase ="""flax_model.msgpack.index.json""" _lowerCamelCase ="""model.safetensors""" _lowerCamelCase ="""model.safetensors.index.json""" _lowerCamelCase ="""config.json""" _lowerCamelCase ="""preprocessor_config.json""" _lowerCamelCase =FEATURE_EXTRACTOR_NAME _lowerCamelCase ="""generation_config.json""" _lowerCamelCase ="""modelcard.json""" _lowerCamelCase ="""▁""" _lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility _lowerCamelCase =[ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. _lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] _lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def _a ( lowerCamelCase ): if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ): if "dev" in min_version: lowerCamelCase : Optional[int] = ( """This example requires a source install from HuggingFace Transformers (see """ """`https://huggingface.co/docs/transformers/installation#install-from-source`),""" ) else: lowerCamelCase : int = F'''This example requires a minimum version of {min_version},''' error_message += F''' but the version found is {__version__}.\n''' raise ImportError( error_message + """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """ """versions of HuggingFace Transformers.""" )
287
0
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt' UpperCAmelCase__ = '"text": ["foo", "foo"]' UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase__ : __a = 200 __a = {"""Content-Length""": """100"""} __a = {} def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ): return [bytes(_lowerCamelCase , '''utf-8''' )] def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: import requests monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase ) _snake_case = URL if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = url elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [url] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': url} _snake_case = '''dummy''' _snake_case = '''downloads''' _snake_case = tmp_path _snake_case = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.download(__lowerCamelCase ) _snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [downloaded_paths] _snake_case = [urls] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in downloaded_paths.keys() _snake_case = downloaded_paths.values() _snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _snake_case = Path(__lowerCamelCase ) _snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _snake_case = downloaded_path.read_text() assert content == CONTENT _snake_case = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int: _snake_case = str(__lowerCamelCase ) if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = filename elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [filename] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': filename} _snake_case = '''dummy''' _snake_case = xz_file.parent _snake_case = '''extracted''' _snake_case = DownloadConfig( cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.extract(__lowerCamelCase ) _snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [extracted_paths] _snake_case = [paths] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in extracted_paths.keys() _snake_case = extracted_paths.values() _snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] _snake_case = Path(__lowerCamelCase ) _snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _snake_case = extracted_path.read_text() _snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__lowerCamelCase , start=1 ): _snake_case = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
288
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = embeddings_size _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = hidden_act _snake_case = num_labels _snake_case = scope _snake_case = len(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = TFResNetModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ): _snake_case = self.num_labels _snake_case = TFResNetForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Tuple ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] ): _snake_case = TFResNetModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : List[Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def lowercase ( self : Any ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ): _snake_case = model_class(_lowerCamelCase ) _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case = layer_type _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ): _snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
288
1
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path UpperCAmelCase__ = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) UpperCAmelCase__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} UpperCAmelCase__ = 'zero2' UpperCAmelCase__ = 'zero3' UpperCAmelCase__ = [ZEROa, ZEROa] def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _snake_case = parameterized.to_safe_name('''_'''.join(str(__lowerCamelCase ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test UpperCAmelCase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( A_ ): @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def lowercase ( self : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] ): self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ): self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def lowercase ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ): self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def lowercase ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ): self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) def lowercase ( self : int , _lowerCamelCase : int ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int = 10 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , ): _snake_case = models[model] _snake_case = self.run_trainer( stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) self.do_checks(_lowerCamelCase ) return output_dir def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , ): _snake_case = self.get_auto_remove_tmp_dir('''./xxx''' , after=_lowerCamelCase ) _snake_case = f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_lowerCamelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _snake_case = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _snake_case = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _snake_case = self.get_launcher(_lowerCamelCase ) _snake_case = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowerCamelCase , env=self.get_env() ) return output_dir def lowercase ( self : List[str] , _lowerCamelCase : str=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _snake_case = min(2 , get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
288
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = Path(tmpdirname) UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase__ = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
288
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = '▁' UpperCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} UpperCAmelCase__ = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } UpperCAmelCase__ = {'vinai/bartpho-syllable': 1024} class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : List[Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="<unk>" , _lowerCamelCase : Union[str, Any]="<pad>" , _lowerCamelCase : Optional[Any]="<mask>" , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Dict , ): # Mask token behave like a normal word, i.e. include the space before it _snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token _snake_case = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) _snake_case = vocab_file _snake_case = monolingual_vocab_file _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _snake_case = {} _snake_case = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids: _snake_case = cnt cnt += 1 with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _snake_case = line.strip().split()[0] _snake_case = len(self.fairseq_tokens_to_ids ) if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids: _snake_case = len(self.fairseq_tokens_to_ids ) _snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ): _snake_case = self.__dict__.copy() _snake_case = None _snake_case = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , _lowerCamelCase : Union[str, Any] ): _snake_case = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case = {} _snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case = [self.cls_token_id] _snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowercase ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase ( self : Any , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowercase ( self : Optional[int] ): return len(self.fairseq_ids_to_tokens ) def lowercase ( self : str ): _snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase ( self : Any , _lowerCamelCase : str ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def lowercase ( self : Tuple , _lowerCamelCase : Optional[Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowercase ( self : List[str] , _lowerCamelCase : Optional[int] ): return self.fairseq_ids_to_tokens[index] def lowercase ( self : str , _lowerCamelCase : Dict ): _snake_case = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: _snake_case = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(_lowerCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = limit + 1 _snake_case = [0] * limit for first_term in range(1 , __lowerCamelCase ): for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _snake_case = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
288
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase__ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
288
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ' Hello world! cécé herlolip' UpperCAmelCase__ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]: _snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str: _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' ) _snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _snake_case = emb.weight.data return lin_layer @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]: if not os.path.exists(__lowerCamelCase ): _snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval() else: _snake_case = load_xsum_checkpoint(__lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _snake_case = checkpoint_path.replace('''.''' , '''-''' ) _snake_case = BartConfig.from_pretrained(__lowerCamelCase ) _snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 ) _snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _snake_case = bart.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = BartForSequenceClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase ) _snake_case = model(__lowerCamelCase )[0] # logits else: # no classification heads to worry about _snake_case = bart.model.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''decoder.embed_tokens.weight'''] _snake_case = bart.extract_features(__lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": _snake_case = BartModel(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = model(__lowerCamelCase ).model[0] else: _snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowerCamelCase ) if hasattr(__lowerCamelCase , '''lm_head''' ): _snake_case = make_linear_from_emb(model.model.shared ) _snake_case = model.model(__lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
288
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase__ = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> List[Any]: _snake_case = EfficientNetConfig() _snake_case = CONFIG_MAP[model_name]['''hidden_dim'''] _snake_case = CONFIG_MAP[model_name]['''width_coef'''] _snake_case = CONFIG_MAP[model_name]['''depth_coef'''] _snake_case = CONFIG_MAP[model_name]['''image_size'''] _snake_case = CONFIG_MAP[model_name]['''dropout_rate'''] _snake_case = CONFIG_MAP[model_name]['''dw_padding'''] _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = 10_00 _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} return config def _UpperCAmelCase ( ) -> List[Any]: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any: _snake_case = CONFIG_MAP[model_name]['''image_size'''] _snake_case = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=__lowerCamelCase , ) return preprocessor def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[str]: _snake_case = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] _snake_case = sorted(set(__lowerCamelCase ) ) _snake_case = len(__lowerCamelCase ) _snake_case = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )} _snake_case = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: _snake_case = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) _snake_case = {} for item in rename_keys: if item[0] in original_param_names: _snake_case = '''efficientnet.''' + item[1] _snake_case = '''classifier.weight''' _snake_case = '''classifier.bias''' return key_mapping def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]: for key, value in tf_params.items(): if "normalization" in key: continue _snake_case = key_mapping[key] if "_conv" in key and "kernel" in key: _snake_case = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: _snake_case = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: _snake_case = torch.from_numpy(np.transpose(__lowerCamelCase ) ) else: _snake_case = torch.from_numpy(__lowerCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[Any]: _snake_case = model_classes[model_name]( include_top=__lowerCamelCase , weights='''imagenet''' , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=10_00 , classifier_activation='''softmax''' , ) _snake_case = original_model.trainable_variables _snake_case = original_model.non_trainable_variables _snake_case = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _snake_case = param.numpy() _snake_case = list(tf_params.keys() ) # Load HuggingFace model _snake_case = get_efficientnet_config(__lowerCamelCase ) _snake_case = EfficientNetForImageClassification(__lowerCamelCase ).eval() _snake_case = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) _snake_case = rename_keys(__lowerCamelCase ) replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Initialize preprocessor and preprocess input image _snake_case = convert_image_processor(__lowerCamelCase ) _snake_case = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): _snake_case = hf_model(**__lowerCamelCase ) _snake_case = outputs.logits.detach().numpy() # Original model inference _snake_case = False _snake_case = CONFIG_MAP[model_name]['''image_size'''] _snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) _snake_case = image.img_to_array(__lowerCamelCase ) _snake_case = np.expand_dims(__lowerCamelCase , axis=0 ) _snake_case = original_model.predict(__lowerCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__lowerCamelCase ): os.mkdir(__lowerCamelCase ) # Save converted model and image processor hf_model.save_pretrained(__lowerCamelCase ) preprocessor.save_pretrained(__lowerCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) _snake_case = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(__lowerCamelCase ) hf_model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase__ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any: stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 ) return arr def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(__lowerCamelCase , i + t , (__lowerCamelCase) ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
288
1
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Optional[Any]: _snake_case = filter(lambda __lowerCamelCase : p.requires_grad , model.parameters() ) _snake_case = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCAmelCase__ = logging.getLogger(__name__) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Dict ) -> str: if metric == "rouge2": _snake_case = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _snake_case = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _snake_case = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) _snake_case = ModelCheckpoint( dirpath=__lowerCamelCase , filename=__lowerCamelCase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> List[str]: return EarlyStopping( monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowerCamelCase , verbose=__lowerCamelCase , ) class lowerCAmelCase__ ( pl.Callback ): def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ): _snake_case = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_lowerCamelCase ) @rank_zero_only def lowercase ( self : Tuple , _lowerCamelCase : pl.Trainer , _lowerCamelCase : pl.LightningModule , _lowerCamelCase : str , _lowerCamelCase : Tuple=True ): logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _snake_case = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results _snake_case = Path(pl_module.hparams.output_dir ) if type_path == "test": _snake_case = od / '''test_results.txt''' _snake_case = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _snake_case = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' _snake_case = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_lowerCamelCase ) generations_file.parent.mkdir(exist_ok=_lowerCamelCase ) with open(_lowerCamelCase , '''a+''' ) as writer: for key in sorted(_lowerCamelCase ): if key in ["log", "progress_bar", "preds"]: continue _snake_case = metrics[key] if isinstance(_lowerCamelCase , torch.Tensor ): _snake_case = val.item() _snake_case = f'''{key}: {val:.6f}\n''' writer.write(_lowerCamelCase ) if not save_generations: return if "preds" in metrics: _snake_case = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(_lowerCamelCase ) @rank_zero_only def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[str] ): try: _snake_case = pl_module.model.model.num_parameters() except AttributeError: _snake_case = pl_module.model.num_parameters() _snake_case = count_trainable_parameters(_lowerCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} ) @rank_zero_only def lowercase ( self : List[str] , _lowerCamelCase : pl.Trainer , _lowerCamelCase : pl.LightningModule ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' ) @rank_zero_only def lowercase ( self : Any , _lowerCamelCase : pl.Trainer , _lowerCamelCase : Any ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
288
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]: return 1 / (1 + np.exp(-z )) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]: return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]: _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]: _snake_case = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase__ = datasets.load_iris() UpperCAmelCase__ = iris.data[:, :2] UpperCAmelCase__ = (iris.target != 0) * 1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]: return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
288
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=A_ ): __a = ["""onnx"""] def __init__( self : Optional[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Tuple ): requires_backends(self , ['''onnx'''] ) @classmethod def lowercase ( cls : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Dict ): requires_backends(cls , ['''onnx'''] ) @classmethod def lowercase ( cls : Optional[int] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ): requires_backends(cls , ['''onnx'''] )
288
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'} UpperCAmelCase__ = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCAmelCase__ = { 'google/rembert': 256, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ): super().__init__( do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(_lowerCamelCase ) @property def lowercase ( self : int ): return len(self.sp_model ) def lowercase ( self : Any ): _snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__( self : List[str] , _lowerCamelCase : Tuple ): _snake_case = d _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ): _snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase ) return pieces def lowercase ( self : str , _lowerCamelCase : str ): return self.sp_model.PieceToId(_lowerCamelCase ) def lowercase ( self : List[str] , _lowerCamelCase : int ): return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = self.sp_model.decode_pieces(_lowerCamelCase ) return out_string def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
288
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=7 , _lowerCamelCase : str=3 , _lowerCamelCase : Any=18 , _lowerCamelCase : Any=30 , _lowerCamelCase : Dict=400 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=True , ): _snake_case = size if size is not None else {'''height''': 18, '''width''': 18} _snake_case = parent _snake_case = batch_size _snake_case = num_channels _snake_case = image_size _snake_case = min_resolution _snake_case = max_resolution _snake_case = do_resize _snake_case = size _snake_case = apply_ocr def lowercase ( self : Optional[Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowercase ( self : int ): _snake_case = LayoutLMvaImageProcessingTester(self ) @property def lowercase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowercase ( self : Optional[Any] ): _snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) ) def lowercase ( self : Union[str, Any] ): _snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def lowercase ( self : Any ): pass def lowercase ( self : Optional[Any] ): # Initialize image_processing _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , _lowerCamelCase ) self.assertIsInstance(encoding.boxes , _lowerCamelCase ) # Test batched _snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase ( self : str ): # Initialize image_processing _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase ( self : Any ): # Initialize image_processing _snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input _snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def lowercase ( self : List[Any] ): # with apply_OCR = True _snake_case = LayoutLMvaImageProcessor() from datasets import load_dataset _snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) _snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) _snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 _snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 _snake_case = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowerCamelCase ) self.assertListEqual(encoding.boxes , _lowerCamelCase ) # with apply_OCR = False _snake_case = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) _snake_case = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
288
"""simple docstring""" from math import pow def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) return current_sum, solutions_count def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase__ = logging.getLogger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Optional[int]: _snake_case = np.argmax(__lowerCamelCase , axis=1 ) return np.sum(outputs == labels ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> int: with open(__lowerCamelCase , encoding='''utf_8''' ) as f: _snake_case = csv.reader(__lowerCamelCase ) _snake_case = [] next(__lowerCamelCase ) # skip the first line for line in tqdm(__lowerCamelCase ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Any: _snake_case = [] for dataset in encoded_datasets: _snake_case = len(__lowerCamelCase ) _snake_case = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _snake_case = np.zeros((n_batch, 2) , dtype=np.intaa ) _snake_case = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) _snake_case = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__lowerCamelCase ): _snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _snake_case = with_conta _snake_case = with_conta _snake_case = len(__lowerCamelCase ) - 1 _snake_case = len(__lowerCamelCase ) - 1 _snake_case = with_conta _snake_case = with_conta _snake_case = mc_label _snake_case = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) ) return tensor_datasets def _UpperCAmelCase ( ) -> Optional[Any]: _snake_case = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=__lowerCamelCase , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=__lowerCamelCase , default='''''' ) parser.add_argument('''--eval_dataset''' , type=__lowerCamelCase , default='''''' ) parser.add_argument('''--seed''' , type=__lowerCamelCase , default=42 ) parser.add_argument('''--num_train_epochs''' , type=__lowerCamelCase , default=3 ) parser.add_argument('''--train_batch_size''' , type=__lowerCamelCase , default=8 ) parser.add_argument('''--eval_batch_size''' , type=__lowerCamelCase , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=__lowerCamelCase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=__lowerCamelCase , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=__lowerCamelCase , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=__lowerCamelCase , default=6.2_5E-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=__lowerCamelCase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=__lowerCamelCase , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=__lowerCamelCase , default=0.01 ) parser.add_argument('''--lm_coef''' , type=__lowerCamelCase , default=0.9 ) parser.add_argument('''--n_valid''' , type=__lowerCamelCase , default=3_74 ) parser.add_argument('''--server_ip''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=__lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case = parser.parse_args() print(__lowerCamelCase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) _snake_case = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(__lowerCamelCase , __lowerCamelCase ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _snake_case = ['''_start_''', '''_delimiter_''', '''_classify_'''] _snake_case = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__lowerCamelCase ) _snake_case = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) _snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__lowerCamelCase ) ) model.to(__lowerCamelCase ) # Load and encode the datasets def tokenize_and_encode(__lowerCamelCase : Union[str, Any] ): if isinstance(__lowerCamelCase , __lowerCamelCase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): return obj return [tokenize_and_encode(__lowerCamelCase ) for o in obj] logger.info('''Encoding dataset...''' ) _snake_case = load_rocstories_dataset(args.train_dataset ) _snake_case = load_rocstories_dataset(args.eval_dataset ) _snake_case = (train_dataset, eval_dataset) _snake_case = tokenize_and_encode(__lowerCamelCase ) # Compute the max input length for the Transformer _snake_case = model.config.n_positions // 2 - 2 _snake_case = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _snake_case = min(__lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _snake_case = pre_process_datasets(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase ) _snake_case , _snake_case = tensor_datasets[0], tensor_datasets[1] _snake_case = TensorDataset(*__lowerCamelCase ) _snake_case = RandomSampler(__lowerCamelCase ) _snake_case = DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.train_batch_size ) _snake_case = TensorDataset(*__lowerCamelCase ) _snake_case = SequentialSampler(__lowerCamelCase ) _snake_case = DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _snake_case = args.max_steps _snake_case = args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1 else: _snake_case = len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs _snake_case = list(model.named_parameters() ) _snake_case = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] _snake_case = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] _snake_case = AdamW(__lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon ) _snake_case = get_linear_schedule_with_warmup( __lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCamelCase ) if args.do_train: _snake_case , _snake_case , _snake_case = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): _snake_case = 0 _snake_case = 0 _snake_case = tqdm(__lowerCamelCase , desc='''Training''' ) for step, batch in enumerate(__lowerCamelCase ): _snake_case = tuple(t.to(__lowerCamelCase ) for t in batch ) _snake_case , _snake_case , _snake_case , _snake_case = batch _snake_case = model(__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase ) _snake_case = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _snake_case = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _snake_case = '''Training loss: {:.2e} lr: {:.2e}'''.format(__lowerCamelCase , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _snake_case = model.module if hasattr(__lowerCamelCase , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _snake_case = os.path.join(args.output_dir , __lowerCamelCase ) _snake_case = os.path.join(args.output_dir , __lowerCamelCase ) torch.save(model_to_save.state_dict() , __lowerCamelCase ) model_to_save.config.to_json_file(__lowerCamelCase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _snake_case = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__lowerCamelCase ) if args.do_eval: model.eval() _snake_case , _snake_case = 0, 0 _snake_case , _snake_case = 0, 0 for batch in tqdm(__lowerCamelCase , desc='''Evaluating''' ): _snake_case = tuple(t.to(__lowerCamelCase ) for t in batch ) _snake_case , _snake_case , _snake_case , _snake_case = batch with torch.no_grad(): _snake_case , _snake_case , _snake_case , _snake_case = model( __lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase ) _snake_case = mc_logits.detach().cpu().numpy() _snake_case = mc_labels.to('''cpu''' ).numpy() _snake_case = accuracy(__lowerCamelCase , __lowerCamelCase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _snake_case = eval_loss / nb_eval_steps _snake_case = eval_accuracy / nb_eval_examples _snake_case = tr_loss / nb_tr_steps if args.do_train else None _snake_case = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} _snake_case = os.path.join(args.output_dir , '''eval_results.txt''' ) with open(__lowerCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , __lowerCamelCase , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
288
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Any ): _snake_case = tempfile.mkdtemp() # fmt: off _snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) _snake_case = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _snake_case = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Tuple , **_lowerCamelCase : Any ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : str , **_lowerCamelCase : Any ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : int , **_lowerCamelCase : Optional[int] ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Any ): _snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : Optional[Any] ): _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = self.get_image_processor() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) _snake_case = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = processor(text=_lowerCamelCase ) _snake_case = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowercase ( self : List[str] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case = processor.batch_decode(_lowerCamelCase ) _snake_case = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
288
1
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase__ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase__ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase__ = model.state_dict() UpperCAmelCase__ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase__ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"] UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
288
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort UpperCAmelCase__ = '1' UpperCAmelCase__ = '0' UpperCAmelCase__ = '1' UpperCAmelCase__ = ort.SessionOptions() UpperCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') UpperCAmelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider'] UpperCAmelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) UpperCAmelCase__ = ort.RunOptions() UpperCAmelCase__ = 128 UpperCAmelCase__ = 1 UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') UpperCAmelCase__ = time.time() UpperCAmelCase__ = 2000 UpperCAmelCase__ = {} for iter in range(max_iters): UpperCAmelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
288
1
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig UpperCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( A_ ): __a = """masked_bert""" def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ): super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = pruning_method _snake_case = mask_init _snake_case = mask_scale
288
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCAmelCase__ = 'pt' elif is_tf_available(): UpperCAmelCase__ = 'tf' else: UpperCAmelCase__ = 'jax' class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = PerceiverTokenizer __a = False def lowercase ( self : Union[str, Any] ): super().setUp() _snake_case = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Dict ): return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def lowercase ( self : int , **_lowerCamelCase : Optional[Any] ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=False , _lowerCamelCase : Optional[int]=20 , _lowerCamelCase : List[str]=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _snake_case = [] for i in range(len(_lowerCamelCase ) ): try: _snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _snake_case = list(filter(lambda _lowerCamelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _lowerCamelCase ) ) _snake_case = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) ) if max_length is not None and len(_lowerCamelCase ) > max_length: _snake_case = toks[:max_length] if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0: while len(_lowerCamelCase ) < min_length: _snake_case = toks + toks # toks_str = [t[1] for t in toks] _snake_case = [t[0] for t in toks] # Ensure consistency _snake_case = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) if " " not in output_txt and len(_lowerCamelCase ) > 1: _snake_case = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase ) ) if with_prefix_space: _snake_case = ''' ''' + output_txt _snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) return output_txt, output_ids def lowercase ( self : List[Any] ): _snake_case = self.perceiver_tokenizer _snake_case = '''Unicode €.''' _snake_case = tokenizer(_lowerCamelCase ) _snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase ) # decoding _snake_case = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , '''[CLS]Unicode €.[SEP]''' ) _snake_case = tokenizer('''e è é ê ë''' ) _snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , _lowerCamelCase ) # decoding _snake_case = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def lowercase ( self : List[str] ): _snake_case = self.perceiver_tokenizer _snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off _snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on _snake_case = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) if FRAMEWORK != "jax": _snake_case = list(batch.input_ids.numpy()[0] ) else: _snake_case = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowercase ( self : Optional[int] ): _snake_case = self.perceiver_tokenizer _snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _snake_case = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , _lowerCamelCase ) self.assertIn('''attention_mask''' , _lowerCamelCase ) self.assertNotIn('''decoder_input_ids''' , _lowerCamelCase ) self.assertNotIn('''decoder_attention_mask''' , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.perceiver_tokenizer _snake_case = [ '''Summary of the text.''', '''Another summary.''', ] _snake_case = tokenizer( text_target=_lowerCamelCase , max_length=32 , padding='''max_length''' , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def lowercase ( self : List[Any] ): # safety check on max_len default value so we are sure the test works _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ''' He is very happy, UNwant\u00E9d,running''' _snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = tokenizer.__class__.from_pretrained(_lowerCamelCase ) _snake_case = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) shutil.rmtree(_lowerCamelCase ) _snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _snake_case = tempfile.mkdtemp() _snake_case = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) _snake_case = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) _snake_case = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = tokenizer.__class__.from_pretrained(_lowerCamelCase ) _snake_case = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _snake_case = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: _snake_case = json.load(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: _snake_case = json.load(_lowerCamelCase ) _snake_case = [f'''<extra_id_{i}>''' for i in range(125 )] _snake_case = added_tokens_extra_ids + [ '''an_additional_special_token''' ] _snake_case = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_lowerCamelCase , _lowerCamelCase ) with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_lowerCamelCase , _lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _snake_case = tokenizer_class.from_pretrained( _lowerCamelCase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_lowerCamelCase )] _snake_case = tokenizer_class.from_pretrained( _lowerCamelCase , additional_special_tokens=_lowerCamelCase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def lowercase ( self : Any ): _snake_case = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : Any ): pass def lowercase ( self : str ): pass def lowercase ( self : int ): pass def lowercase ( self : Any ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _snake_case = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): _snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] _snake_case = tokenizer.convert_tokens_to_string(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
288
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): __a = None def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]: import pyspark def generate_fn(): _snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case = partition_df.collect() _snake_case = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ): _snake_case = df _snake_case = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): yield from self.generate_examples_fn() def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ): _snake_case = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): _snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) @property def lowercase ( self : List[str] ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): __a = SparkConfig def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): import pyspark _snake_case = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case = df _snake_case = working_dir super().__init__( cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , ) def lowercase ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCamelCase : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCamelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def lowercase ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): import pyspark def get_arrow_batch_size(_lowerCamelCase : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case = self.df.count() _snake_case = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case = ( self.df.limit(_lowerCamelCase ) .repartition(1 ) .mapInArrow(_lowerCamelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) ) _snake_case = self.df.repartition(_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ): import pyspark _snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath _snake_case = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case = self.config.features _snake_case = self._writer_batch_size _snake_case = self._fs.storage_options def write_arrow(_lowerCamelCase : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case = pyspark.TaskContext().taskAttemptId() _snake_case = next(_lowerCamelCase , _lowerCamelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case = 0 _snake_case = writer_class( features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCamelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([batch] ) writer.write_table(_lowerCamelCase ) if writer._num_bytes > 0: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCamelCase ) ): _snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) ) shutil.move(_lowerCamelCase , _lowerCamelCase ) _snake_case = ( self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ): self._validate_cache_dir() _snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCamelCase ) _snake_case = not is_remote_filesystem(self._fs ) _snake_case = os.path.join if is_local else posixpath.join _snake_case = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case = path_join(self._output_dir , _lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = [] _snake_case = [] for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCamelCase ) _snake_case = total_num_examples _snake_case = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ): rename( _lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _snake_case = [] _snake_case = 0 for i in range(len(_lowerCamelCase ) ): _snake_case , _snake_case = task_id_and_num_shards[i] for shard_id in range(_lowerCamelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect() else: # don't use any pattern _snake_case = 0 _snake_case = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , ) def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
288
1
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : List[str] ): _snake_case = 0 @slow def lowercase ( self : Tuple ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(_lowerCamelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(_lowerCamelCase ) , 0 ) def lowercase ( self : Union[str, Any] ): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase ( self : int ): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowercase ( self : Tuple ): _snake_case = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) # Check that tokenizer_type ≠ model_type _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowercase ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_lowerCamelCase , '''vocab.txt''' ) ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''bert''' , use_fast=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_lowerCamelCase , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_lowerCamelCase , '''merges.txt''' ) ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) @require_tokenizers def lowercase ( self : Dict ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_lowerCamelCase , '''vocab.txt''' ) ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''bert''' ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_lowerCamelCase , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_lowerCamelCase , '''merges.txt''' ) ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , tokenizer_type='''gpt2''' ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): with pytest.raises(_lowerCamelCase ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowercase ( self : Optional[Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: _snake_case = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(_lowerCamelCase , _lowerCamelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCamelCase ) else: self.assertEqual(tokenizer.do_lower_case , _lowerCamelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowercase ( self : List[Any] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( _lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): _snake_case = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowercase ( self : Dict ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai _snake_case = TOKENIZER_MAPPING.values() _snake_case = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(_lowerCamelCase ) @require_tokenizers def lowercase ( self : Dict ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_lowerCamelCase ) , _lowerCamelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _lowerCamelCase ) @require_tokenizers def lowercase ( self : Optional[int] ): _snake_case = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_lowerCamelCase ) _snake_case = '''Hello, world. How are you?''' _snake_case = tokenizer.tokenize(_lowerCamelCase ) self.assertEqual('''[UNK]''' , tokens[0] ) _snake_case = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_lowerCamelCase ) _snake_case = tokenizer.tokenize(_lowerCamelCase ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowercase ( self : Optional[Any] ): _snake_case = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowercase ( self : Optional[int] ): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowercase ( self : Union[str, Any] ): _snake_case = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): # Check we can load the tokenizer config of an online model. _snake_case = get_tokenizer_config('''bert-base-cased''' ) _snake_case = config.pop('''_commit_hash''' , _lowerCamelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(_lowerCamelCase , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. _snake_case = get_tokenizer_config(_lowerCamelCase ) self.assertDictEqual(_lowerCamelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = get_tokenizer_config(_lowerCamelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowercase ( self : List[str] ): try: AutoConfig.register('''custom''' , _lowerCamelCase ) AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCamelCase ): AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase ) _snake_case = CustomTokenizer.from_pretrained(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowercase ( self : List[Any] ): try: AutoConfig.register('''custom''' , _lowerCamelCase ) # Can register in two steps AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( _lowerCamelCase , slow_tokenizer_class=_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_lowerCamelCase ): AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = BertTokenizerFast.from_pretrained(_lowerCamelCase ) bert_tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = CustomTokenizerFast.from_pretrained(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase ( self : str ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_lowerCamelCase ): _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_lowerCamelCase ): _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowercase ( self : int ): class lowerCAmelCase__ ( A_ ): __a = False class lowerCAmelCase__ ( A_ ): __a = NewTokenizer __a = False try: AutoConfig.register('''custom''' , _lowerCamelCase ) AutoTokenizer.register(_lowerCamelCase , slow_tokenizer_class=_lowerCamelCase ) AutoTokenizer.register(_lowerCamelCase , fast_tokenizer_class=_lowerCamelCase ) # If remote code is not set, the default is to use local _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_lowerCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowercase ( self : str ): _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_lowerCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version _snake_case = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_lowerCamelCase , use_fast=_lowerCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowercase ( self : Optional[int] ): with self.assertRaisesRegex( _lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ): _snake_case = AutoTokenizer.from_pretrained('''bert-base''' ) def lowercase ( self : str ): with self.assertRaisesRegex( _lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' ) def lowercase ( self : Tuple ): # Make sure we have cached the tokenizer. _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _snake_case = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
288
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ' Hello world! cécé herlolip' UpperCAmelCase__ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]: _snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str: _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' ) _snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _snake_case = emb.weight.data return lin_layer @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]: if not os.path.exists(__lowerCamelCase ): _snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval() else: _snake_case = load_xsum_checkpoint(__lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _snake_case = checkpoint_path.replace('''.''' , '''-''' ) _snake_case = BartConfig.from_pretrained(__lowerCamelCase ) _snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 ) _snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _snake_case = bart.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = BartForSequenceClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase ) _snake_case = model(__lowerCamelCase )[0] # logits else: # no classification heads to worry about _snake_case = bart.model.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''decoder.embed_tokens.weight'''] _snake_case = bart.extract_features(__lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": _snake_case = BartModel(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = model(__lowerCamelCase ).model[0] else: _snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowerCamelCase ) if hasattr(__lowerCamelCase , '''lm_head''' ): _snake_case = make_linear_from_emb(model.model.shared ) _snake_case = model.model(__lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
288
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]: _snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: _snake_case = '''''' else: _snake_case = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _snake_case = in_proj_weight[ : config.hidden_size, : ] _snake_case = in_proj_bias[: config.hidden_size] _snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case = in_proj_weight[ -config.hidden_size :, : ] _snake_case = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( ) -> Dict: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str: _snake_case = DeiTConfig() # all deit models have fine-tuned heads _snake_case = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _snake_case = 10_00 _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = int(deit_name[-6:-4] ) _snake_case = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _snake_case = 1_92 _snake_case = 7_68 _snake_case = 12 _snake_case = 3 elif deit_name[9:].startswith('''small''' ): _snake_case = 3_84 _snake_case = 15_36 _snake_case = 12 _snake_case = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _snake_case = 10_24 _snake_case = 40_96 _snake_case = 24 _snake_case = 16 # load original model from timm _snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _snake_case = timm_model.state_dict() _snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _snake_case = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size ) _snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) _snake_case = encoding['''pixel_values'''] _snake_case = model(__lowerCamelCase ) _snake_case = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
288
1
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase__ ( A_ ): def __lt__( self : Any , _lowerCamelCase : int ): return self[-1] < other[-1] def __eq__( self : int , _lowerCamelCase : Optional[Any] ): return self[-1] == other[-1] def _UpperCAmelCase ( __lowerCamelCase : list ) -> list: _snake_case = [] # sort into stacks for element in collection: _snake_case = Stack([element] ) _snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase ) if i != len(__lowerCamelCase ): stacks[i].append(__lowerCamelCase ) else: stacks.append(__lowerCamelCase ) # use a heap-based merge to merge stack efficiently _snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
288
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt' UpperCAmelCase__ = '"text": ["foo", "foo"]' UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase__ : __a = 200 __a = {"""Content-Length""": """100"""} __a = {} def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ): return [bytes(_lowerCamelCase , '''utf-8''' )] def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: import requests monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase ) _snake_case = URL if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = url elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [url] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': url} _snake_case = '''dummy''' _snake_case = '''downloads''' _snake_case = tmp_path _snake_case = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.download(__lowerCamelCase ) _snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [downloaded_paths] _snake_case = [urls] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in downloaded_paths.keys() _snake_case = downloaded_paths.values() _snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _snake_case = Path(__lowerCamelCase ) _snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _snake_case = downloaded_path.read_text() assert content == CONTENT _snake_case = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int: _snake_case = str(__lowerCamelCase ) if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = filename elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [filename] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': filename} _snake_case = '''dummy''' _snake_case = xz_file.parent _snake_case = '''extracted''' _snake_case = DownloadConfig( cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.extract(__lowerCamelCase ) _snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [extracted_paths] _snake_case = [paths] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in extracted_paths.keys() _snake_case = extracted_paths.values() _snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] _snake_case = Path(__lowerCamelCase ) _snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _snake_case = extracted_path.read_text() _snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__lowerCamelCase , start=1 ): _snake_case = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
288
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class lowerCAmelCase__ ( A_ ): __a = """transfo-xl""" __a = ["""mems"""] __a = { """n_token""": """vocab_size""", """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : List[Any] , _lowerCamelCase : int=267735 , _lowerCamelCase : Dict=[20000, 40000, 200000] , _lowerCamelCase : Optional[Any]=1024 , _lowerCamelCase : Dict=1024 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Any=64 , _lowerCamelCase : Optional[int]=4096 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=False , _lowerCamelCase : str=18 , _lowerCamelCase : Dict=1600 , _lowerCamelCase : Union[str, Any]=1000 , _lowerCamelCase : Dict=True , _lowerCamelCase : int=True , _lowerCamelCase : List[str]=0 , _lowerCamelCase : Union[str, Any]=-1 , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Dict="normal" , _lowerCamelCase : List[Any]=0.0_1 , _lowerCamelCase : Union[str, Any]=0.0_1 , _lowerCamelCase : Optional[int]=0.0_2 , _lowerCamelCase : List[str]=1e-5 , _lowerCamelCase : Dict=0 , **_lowerCamelCase : Optional[Any] , ): _snake_case = vocab_size _snake_case = [] self.cutoffs.extend(_lowerCamelCase ) if proj_share_all_but_first: _snake_case = [False] + [True] * len(self.cutoffs ) else: _snake_case = [False] + [False] * len(self.cutoffs ) _snake_case = d_model _snake_case = d_embed _snake_case = d_head _snake_case = d_inner _snake_case = div_val _snake_case = pre_lnorm _snake_case = n_layer _snake_case = n_head _snake_case = mem_len _snake_case = same_length _snake_case = attn_type _snake_case = clamp_len _snake_case = sample_softmax _snake_case = adaptive _snake_case = dropout _snake_case = dropatt _snake_case = untie_r _snake_case = init _snake_case = init_range _snake_case = proj_init_std _snake_case = init_std _snake_case = layer_norm_epsilon super().__init__(eos_token_id=_lowerCamelCase , **_lowerCamelCase ) @property def lowercase ( self : Any ): # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowercase ( self : Optional[int] , _lowerCamelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
288
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase__ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase__ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase__ = model.state_dict() UpperCAmelCase__ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase__ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"] UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
288
1
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): def __init__( self : Optional[Any] , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Optional[int] ): warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , _lowerCamelCase , ) super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 ) -> list: _snake_case = length or len(__lowerCamelCase ) _snake_case = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _snake_case , _snake_case = list_data[i + 1], list_data[i] _snake_case = True return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None ) -> Tuple: if attention_mask is None: _snake_case = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : __a = OPTConfig __a = {} __a = """gelu""" def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]=99 , _lowerCamelCase : Any=16 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Tuple=4 , _lowerCamelCase : int=4 , _lowerCamelCase : str="gelu" , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : List[str]=20 , _lowerCamelCase : Dict=2 , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Optional[int]=16 , _lowerCamelCase : List[Any]=16 , ): _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = eos_token_id _snake_case = pad_token_id _snake_case = bos_token_id _snake_case = embed_dim _snake_case = word_embed_proj_dim _snake_case = False def lowercase ( self : Union[str, Any] ): _snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_lowerCamelCase , **self.config_updates , ) _snake_case = prepare_opt_inputs_dict(_lowerCamelCase , _lowerCamelCase ) return config, inputs_dict def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = TFOPTModel(config=_lowerCamelCase ) _snake_case = inputs_dict['''input_ids'''] _snake_case = input_ids[:1, :] _snake_case = inputs_dict['''attention_mask'''][:1, :] _snake_case = 1 # first forward pass _snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase ) _snake_case , _snake_case = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] _snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case = output_from_no_past[:, -3:, random_slice_idx] _snake_case = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 ) @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __a = (TFOPTForCausalLM,) if is_tf_available() else () __a = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) __a = False __a = False __a = False __a = 10 def lowercase ( self : Dict ): _snake_case = TFOPTModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase ) def lowercase ( self : List[str] ): self.config_tester.run_common_tests() def lowercase ( self : Optional[Any] ): _snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(_lowerCamelCase : Optional[int] , _lowerCamelCase : Dict ): if hasattr(_lowerCamelCase , '''weight''' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(_lowerCamelCase , '''weight''' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _snake_case = model_class(config=_lowerCamelCase ) _snake_case = _get_word_embedding_weight(_lowerCamelCase , model.get_input_embeddings() ) _snake_case = _get_word_embedding_weight(_lowerCamelCase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(_lowerCamelCase ) _snake_case = _get_word_embedding_weight(_lowerCamelCase , model.get_input_embeddings() ) _snake_case = _get_word_embedding_weight(_lowerCamelCase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _snake_case = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , _lowerCamelCase ) # check that weights remain the same after resizing _snake_case = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _snake_case = False self.assertTrue(_lowerCamelCase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , _lowerCamelCase ) _snake_case = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _snake_case = False self.assertTrue(_lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> List[str]: return tf.constant(__lowerCamelCase , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): __a = 99 def lowercase ( self : Tuple ): _snake_case = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _snake_case = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _snake_case = input_ids.shape[0] _snake_case = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @slow def lowercase ( self : Optional[Any] ): _snake_case = TFOPTModel.from_pretrained('''facebook/opt-350m''' ) _snake_case = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _snake_case = tf.not_equal(_lowerCamelCase , model.config.pad_token_id ) with tf.GradientTape(): _snake_case = model(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase ).last_hidden_state _snake_case = (1, 11, 512) self.assertEqual(output.shape , _lowerCamelCase ) _snake_case = tf.constant( [[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] ) self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCamelCase , atol=4e-3 ) ) _snake_case = tf.function(_lowerCamelCase , jit_compile=_lowerCamelCase ) _snake_case = xla_generate(_lowerCamelCase , _lowerCamelCase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCamelCase , atol=4e-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Optional[Any] ): super().setUp() _snake_case = '''facebook/opt-350m''' def lowercase ( self : Optional[int] ): _snake_case = TFOPTForCausalLM.from_pretrained(self.path_model ) _snake_case = GPTaTokenizer.from_pretrained(self.path_model ) _snake_case = [ '''Today is a beautiful day and I want to''', '''In the city of''', '''Paris is the capital of France and''', '''Computers and mobile phones have taken''', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _snake_case = tokenizer(_lowerCamelCase , return_tensors='''tf''' , padding=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) _snake_case = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _snake_case = tf.constant( [ [1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0], [-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2], [0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3], [6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7], ] ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-4 ) ) _snake_case = tf.function(_lowerCamelCase , jit_compile=_lowerCamelCase ) _snake_case = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): @property def lowercase ( self : List[str] ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def lowercase ( self : Union[str, Any] ): _snake_case = '''facebook/opt-125m''' _snake_case = [ '''Today is a beautiful day and I want to''', '''In the city of New York, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] _snake_case = [] _snake_case = GPTaTokenizer.from_pretrained(_lowerCamelCase ) _snake_case = TFOPTForCausalLM.from_pretrained(_lowerCamelCase ) for prompt in self.prompts: _snake_case = tokenizer(_lowerCamelCase , return_tensors='''tf''' ).input_ids _snake_case = model.generate(_lowerCamelCase , max_length=10 ) _snake_case = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) predicted_outputs += generated_string self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : str ): _snake_case = '''facebook/opt-350m''' _snake_case = GPTaTokenizer.from_pretrained(_lowerCamelCase ) _snake_case = TFOPTForCausalLM.from_pretrained(_lowerCamelCase ) _snake_case = '''left''' # use different length sentences to test batching _snake_case = [ '''Hello, my dog is a little''', '''Today, I''', ] _snake_case = tokenizer(_lowerCamelCase , return_tensors='''tf''' , padding=_lowerCamelCase ) _snake_case = inputs['''input_ids'''] _snake_case = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['''attention_mask'''] ) _snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids _snake_case = model.generate(input_ids=_lowerCamelCase ) _snake_case = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) ) _snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids _snake_case = model.generate(input_ids=_lowerCamelCase , max_length=model.config.max_length - num_paddings ) _snake_case = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) _snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase ) _snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase ) _snake_case = [ '''Hello, my dog is a little bit of a dork.\nI\'m a little bit''', '''Today, I was in the middle of a conversation with a friend about the''', ] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] ) def lowercase ( self : Any ): _snake_case = '''facebook/opt-350m''' _snake_case = [ '''Today is a beautiful day and I want to''', '''In the city of San Francisco, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] _snake_case = [] _snake_case = GPTaTokenizer.from_pretrained(_lowerCamelCase ) _snake_case = TFOPTForCausalLM.from_pretrained(_lowerCamelCase ) for prompt in self.prompts: _snake_case = tokenizer(_lowerCamelCase , return_tensors='''tf''' ).input_ids _snake_case = model.generate(_lowerCamelCase , max_length=10 ) _snake_case = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) predicted_outputs += generated_string self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
288
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5') UpperCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = [] UpperCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]: for attribute in key.split('''.''' ): _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value elif weight_type == "running_mean": _snake_case = value elif weight_type == "running_var": _snake_case = value elif weight_type == "num_batches_tracked": _snake_case = value else: _snake_case = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]: _snake_case = [] if task == "s2t": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2T _snake_case = IGNORE_KEYS_S2T elif task == "t2s": _snake_case = None _snake_case = MAPPING_T2S _snake_case = IGNORE_KEYS_T2S elif task == "s2s": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2S _snake_case = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__lowerCamelCase , __lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue _snake_case = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: _snake_case = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2] _snake_case = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: _snake_case = '''weight_g''' elif "weight_v" in name: _snake_case = '''weight_v''' elif "bias" in name: _snake_case = '''bias''' elif "weight" in name: _snake_case = '''weight''' elif "running_mean" in name: _snake_case = '''running_mean''' elif "running_var" in name: _snake_case = '''running_var''' elif "num_batches_tracked" in name: _snake_case = '''num_batches_tracked''' else: _snake_case = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = full_name.split('''conv_layers.''' )[-1] _snake_case = name.split('''.''' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict: if config_path is not None: _snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase ) else: _snake_case = SpeechTaConfig() if task == "s2t": _snake_case = config.max_text_positions _snake_case = SpeechTaForSpeechToText(__lowerCamelCase ) elif task == "t2s": _snake_case = 18_76 _snake_case = 6_00 _snake_case = config.max_speech_positions _snake_case = SpeechTaForTextToSpeech(__lowerCamelCase ) elif task == "s2s": _snake_case = 18_76 _snake_case = config.max_speech_positions _snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: _snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) _snake_case = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _snake_case = SpeechTaFeatureExtractor() _snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _snake_case = torch.load(__lowerCamelCase ) recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
288
1
"""simple docstring""" from sklearn.metrics import matthews_corrcoef import datasets UpperCAmelCase__ = '\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n' UpperCAmelCase__ = '\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results[\'matthews_correlation\'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric("matthews_correlation")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results[\'matthews_correlation\'], 2))\n -0.25\n' UpperCAmelCase__ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowercase ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : int=None ): return { "matthews_correlation": float(matthews_corrcoef(_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase ) ), }
288
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]: _snake_case = checkpoints.load_tax_checkpoint(__lowerCamelCase ) _snake_case = flatten_dict(__lowerCamelCase ) return flax_params def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]: _snake_case = {} _snake_case = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } _snake_case = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _snake_case = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = flax_dict[key] _snake_case = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _snake_case = torch.from_numpy(converted_dict[key].T ) else: _snake_case = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ) -> int: _snake_case = get_flax_param(__lowerCamelCase ) if not use_large: _snake_case = PixaStructVisionConfig() _snake_case = PixaStructTextConfig() else: _snake_case = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) _snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) _snake_case = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCamelCase ) _snake_case = PixaStructForConditionalGeneration(__lowerCamelCase ) _snake_case = rename_and_convert_flax_params(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) _snake_case = PixaStructImageProcessor() _snake_case = PixaStructProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase ) if use_large: _snake_case = 40_96 _snake_case = True # mkdir if needed os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) print('''Model saved in {}'''.format(__lowerCamelCase ) ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') UpperCAmelCase__ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
288
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Union[str, Any] ): _snake_case = '''ZinengTang/tvlt-base''' _snake_case = tempfile.mkdtemp() def lowercase ( self : Any , **_lowerCamelCase : Tuple ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_lowerCamelCase ) def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_lowerCamelCase ) def lowercase ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : List[Any] ): _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) _snake_case = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _lowerCamelCase ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase ) _snake_case = np.ones([12000] ) _snake_case = feature_extractor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(audio=_lowerCamelCase , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase ) _snake_case = np.ones([3, 224, 224] ) _snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : str ): _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase ) _snake_case = np.ones([12000] ) _snake_case = np.ones([3, 224, 224] ) _snake_case = processor(audio=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowercase ( self : str ): _snake_case = self.get_image_processor() _snake_case = self.get_feature_extractor() _snake_case = TvltProcessor(image_processor=_lowerCamelCase , feature_extractor=_lowerCamelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
288
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase__ ( A_ ): def __lt__( self : Any , _lowerCamelCase : int ): return self[-1] < other[-1] def __eq__( self : int , _lowerCamelCase : Optional[Any] ): return self[-1] == other[-1] def _UpperCAmelCase ( __lowerCamelCase : list ) -> list: _snake_case = [] # sort into stacks for element in collection: _snake_case = Stack([element] ) _snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase ) if i != len(__lowerCamelCase ): stacks[i].append(__lowerCamelCase ) else: stacks.append(__lowerCamelCase ) # use a heap-based merge to merge stack efficiently _snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
288
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = limit + 1 _snake_case = [0] * limit for first_term in range(1 , __lowerCamelCase ): for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _snake_case = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
288
"""simple docstring""" UpperCAmelCase__ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
288
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = PegasusTokenizer __a = PegasusTokenizerFast __a = True __a = True def lowercase ( self : str ): super().setUp() # We have a SentencePiece fixture for testing _snake_case = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Any ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def lowercase ( self : Dict , **_lowerCamelCase : Dict ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): return ("This is a test", "This is a test") def lowercase ( self : str ): _snake_case = '''</s>''' _snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(_lowerCamelCase ) , 1103 ) def lowercase ( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def lowercase ( self : Any ): _snake_case = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname ) _snake_case = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) _snake_case = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] _snake_case = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Tuple ): _snake_case = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word _snake_case = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' _snake_case = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] _snake_case = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 _snake_case = '''To ensure a smooth flow of bank resolutions.''' _snake_case = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] _snake_case = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def lowercase ( self : int ): _snake_case = ['''This is going to be way too long.''' * 150, '''short example'''] _snake_case = ['''not super long but more than 5 tokens''', '''tiny'''] _snake_case = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' ) _snake_case = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def lowercase ( self : List[Any] ): # fmt: off _snake_case = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = PegasusTokenizer __a = PegasusTokenizerFast __a = True __a = True def lowercase ( self : Optional[Any] ): super().setUp() # We have a SentencePiece fixture for testing _snake_case = PegasusTokenizer(_lowerCamelCase , offset=0 , mask_token_sent=_lowerCamelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase ( self : Optional[Any] ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def lowercase ( self : Any , **_lowerCamelCase : int ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : Dict ): return ("This is a test", "This is a test") def lowercase ( self : Tuple ): _snake_case = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) _snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname ) _snake_case = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) _snake_case = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] _snake_case = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @require_torch def lowercase ( self : List[Any] ): _snake_case = ['''This is going to be way too long.''' * 1000, '''short example'''] _snake_case = ['''not super long but more than 5 tokens''', '''tiny'''] _snake_case = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' ) _snake_case = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def lowercase ( self : int ): _snake_case = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) _snake_case = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
288
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = embeddings_size _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = hidden_act _snake_case = num_labels _snake_case = scope _snake_case = len(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = TFResNetModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ): _snake_case = self.num_labels _snake_case = TFResNetForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Tuple ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] ): _snake_case = TFResNetModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : List[Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def lowercase ( self : Any ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ): _snake_case = model_class(_lowerCamelCase ) _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case = layer_type _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ): _snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
288
1
"""simple docstring""" import copy import re class lowerCAmelCase__ : __a = """hp""" __a = {} __a = None @classmethod def lowercase ( cls : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] ): _snake_case = prefix _snake_case = defaults cls.build_naming_info() @staticmethod def lowercase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] ): if len(_lowerCamelCase ) == 0: return "" _snake_case = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_lowerCamelCase ) + 1 ): _snake_case = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _snake_case = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_lowerCamelCase : List[str] ): _snake_case = '''''' while integer != 0: _snake_case = chr(ord('''A''' ) + integer % 10 ) + s integer //= 10 return s _snake_case = 0 while True: _snake_case = word + '''#''' + int_to_alphabetic(_lowerCamelCase ) if sword in info["reverse_short_word"]: continue else: _snake_case = sword break _snake_case = short_word _snake_case = word return short_word @staticmethod def lowercase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ): _snake_case = param_name.split('''_''' ) _snake_case = [TrialShortNamer.shortname_for_word(_lowerCamelCase , _lowerCamelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _snake_case = ['''''', '''_'''] for separator in separators: _snake_case = separator.join(_lowerCamelCase ) if shortname not in info["reverse_short_param"]: _snake_case = shortname _snake_case = param_name return shortname return param_name @staticmethod def lowercase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ): _snake_case = TrialShortNamer.shortname_for_key(_lowerCamelCase , _lowerCamelCase ) _snake_case = short_name _snake_case = param_name @classmethod def lowercase ( cls : List[Any] ): if cls.NAMING_INFO is not None: return _snake_case = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _snake_case = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(_lowerCamelCase , _lowerCamelCase ) _snake_case = info @classmethod def lowercase ( cls : List[Any] , _lowerCamelCase : Union[str, Any] ): cls.build_naming_info() assert cls.PREFIX is not None _snake_case = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _snake_case = cls.NAMING_INFO['''short_param'''][k] if isinstance(_lowerCamelCase , _lowerCamelCase ): _snake_case = 1 if v else 0 _snake_case = '''''' if isinstance(_lowerCamelCase , (int, float) ) else '''-''' _snake_case = f'''{key}{sep}{v}''' name.append(_lowerCamelCase ) return "_".join(_lowerCamelCase ) @classmethod def lowercase ( cls : str , _lowerCamelCase : Optional[int] ): _snake_case = repr[len(cls.PREFIX ) + 1 :] if repr == "": _snake_case = [] else: _snake_case = repr.split('''_''' ) _snake_case = {} for value in values: if "-" in value: _snake_case , _snake_case = value.split('''-''' ) else: _snake_case = re.sub('''[0-9.]''' , '''''' , _lowerCamelCase ) _snake_case = float(re.sub('''[^0-9.]''' , '''''' , _lowerCamelCase ) ) _snake_case = cls.NAMING_INFO['''reverse_short_param'''][p_k] _snake_case = p_v for k in cls.DEFAULTS: if k not in parameters: _snake_case = cls.DEFAULTS[k] return parameters
288
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = Path(tmpdirname) UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase__ = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
288
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]=13 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Dict=True , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=37 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : Dict=0.0_2 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[int]=2 , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = scope _snake_case = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _snake_case = (image_size // patch_size) ** 2 _snake_case = num_patches + 2 def lowercase ( self : Any ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ): _snake_case = TFDeiTModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase ( self : str , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any ): _snake_case = TFDeiTForMaskedImageModeling(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _snake_case = 1 _snake_case = TFDeiTForMaskedImageModeling(_lowerCamelCase ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ): _snake_case = self.type_sequence_label_size _snake_case = TFDeiTForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFDeiTForImageClassification(_lowerCamelCase ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase ( self : Optional[Any] ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __a = False __a = False __a = False __a = False def lowercase ( self : Dict ): _snake_case = TFDeiTModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def lowercase ( self : str ): self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowercase ( self : List[Any] ): pass def lowercase ( self : Union[str, Any] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Dense ) ) def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : Dict ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def lowercase ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=False ): _snake_case = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowercase ( self : Union[str, Any] ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFDeiTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Optional[int]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Optional[int] ): return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowercase ( self : Dict ): _snake_case = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = limit + 1 _snake_case = [0] * limit for first_term in range(1 , __lowerCamelCase ): for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _snake_case = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple: # load base model _snake_case = StableDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors _snake_case = load_file(__lowerCamelCase ) _snake_case = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: _snake_case = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) _snake_case = pipeline.text_encoder else: _snake_case = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) _snake_case = pipeline.unet # find the target layer _snake_case = layer_infos.pop(0 ) while len(__lowerCamelCase ) > -1: try: _snake_case = curr_layer.__getattr__(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: _snake_case = layer_infos.pop(0 ) elif len(__lowerCamelCase ) == 0: break except Exception: if len(__lowerCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: _snake_case = layer_infos.pop(0 ) _snake_case = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(__lowerCamelCase ) else: pair_keys.append(__lowerCamelCase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: _snake_case = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) _snake_case = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: _snake_case = state_dict[pair_keys[0]].to(torch.floataa ) _snake_case = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ) # update visited list for item in pair_keys: visited.append(__lowerCamelCase ) return pipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = args.base_model_path UpperCAmelCase__ = args.checkpoint_path UpperCAmelCase__ = args.dump_path UpperCAmelCase__ = args.lora_prefix_unet UpperCAmelCase__ = args.lora_prefix_text_encoder UpperCAmelCase__ = args.alpha UpperCAmelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase__ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
288
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
1
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): __a = MODEL_FOR_MASKED_LM_MAPPING __a = TF_MODEL_FOR_MASKED_LM_MAPPING def lowercase ( self : Any ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def lowercase ( self : Optional[int] ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser''', }, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self : Any ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''}, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''}, ] , ) _snake_case = unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=6 ) , [ [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def lowercase ( self : List[str] ): _snake_case = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() _snake_case = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) @slow @require_torch def lowercase ( self : Any ): _snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(_lowerCamelCase ) @slow @require_tf def lowercase ( self : Optional[int] ): _snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(_lowerCamelCase ) def lowercase ( self : Any , _lowerCamelCase : Optional[Any] ): _snake_case = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''}, ] , ) _snake_case = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.2_5_1, '''token''': 2201, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.2_1_4, '''token''': 12790, '''token_str''': ''' Lyon''', }, ] , ) _snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''}, ] , ) @require_torch def lowercase ( self : Union[str, Any] ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) _snake_case = None _snake_case = None self.run_pipeline_test(_lowerCamelCase , [] ) @require_tf def lowercase ( self : List[Any] ): _snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) _snake_case = None _snake_case = None self.run_pipeline_test(_lowerCamelCase , [] ) def lowercase ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = [ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def lowercase ( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ): _snake_case = fill_masker.tokenizer _snake_case = fill_masker.model _snake_case = fill_masker( f'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( _lowerCamelCase , [ [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], ] , ) with self.assertRaises(_lowerCamelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(_lowerCamelCase ): fill_masker('''This is''' ) self.run_test_top_k(_lowerCamelCase , _lowerCamelCase ) self.run_test_targets(_lowerCamelCase , _lowerCamelCase ) self.run_test_top_k_targets(_lowerCamelCase , _lowerCamelCase ) self.fill_mask_with_duplicate_targets_and_top_k(_lowerCamelCase , _lowerCamelCase ) self.fill_mask_with_multiple_masks(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : int ): _snake_case = tokenizer.get_vocab() _snake_case = sorted(vocab.keys() )[:2] # Pipeline argument _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , targets=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase ) _snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) ) # Call argument _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase ) _snake_case = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) ) # Score equivalence _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) _snake_case = [top_mask['''token_str'''] for top_mask in outputs] _snake_case = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_lowerCamelCase ) == set(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase ) _snake_case = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) # Raises with invalid with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] ) with self.assertRaises(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' ) def lowercase ( self : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , top_k=2 ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( _lowerCamelCase , [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ] , ) self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) def lowercase ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ): _snake_case = tokenizer.get_vocab() _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) # top_k=2, ntargets=3 _snake_case = sorted(vocab.keys() )[:3] _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCamelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results _snake_case = [el['''token_str'''] for el in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(_lowerCamelCase ).issubset(_lowerCamelCase ): _snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCamelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) ) def lowercase ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = tokenizer.get_vocab() # String duplicates + id duplicates _snake_case = sorted(vocab.keys() )[:3] _snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]] _snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCamelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(_lowerCamelCase ) , 3 ) def lowercase ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ): _snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase ) _snake_case = fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( _lowerCamelCase , [ [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], [ {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, {'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )}, ], ] , )
288
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ' Hello world! cécé herlolip' UpperCAmelCase__ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]: _snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str: _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' ) _snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _snake_case = emb.weight.data return lin_layer @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]: if not os.path.exists(__lowerCamelCase ): _snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval() else: _snake_case = load_xsum_checkpoint(__lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _snake_case = checkpoint_path.replace('''.''' , '''-''' ) _snake_case = BartConfig.from_pretrained(__lowerCamelCase ) _snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 ) _snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _snake_case = bart.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = BartForSequenceClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase ) _snake_case = model(__lowerCamelCase )[0] # logits else: # no classification heads to worry about _snake_case = bart.model.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''decoder.embed_tokens.weight'''] _snake_case = bart.extract_features(__lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": _snake_case = BartModel(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = model(__lowerCamelCase ).model[0] else: _snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowerCamelCase ) if hasattr(__lowerCamelCase , '''lm_head''' ): _snake_case = make_linear_from_emb(model.model.shared ) _snake_case = model.model(__lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
288
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowerCAmelCase__ ( A_ , A_ , A_ ): __a = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 50257 , _lowerCamelCase : int = 1024 , _lowerCamelCase : int = 768 , _lowerCamelCase : int = 12 , _lowerCamelCase : int = 12 , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str = "gelu_new" , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 1e-5 , _lowerCamelCase : float = 0.0_2 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ): super().__init__() _snake_case = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' f''' `n_embd`: {n_embd} are not equal.''' ) _snake_case = prefix_inner_dim _snake_case = prefix_hidden_dim _snake_case = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) _snake_case = ( nn.Linear(self.prefix_hidden_dim , _lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity() ) _snake_case = GPTaConfig( vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , ) _snake_case = GPTaLMHeadModel(_lowerCamelCase ) def lowercase ( self : Optional[int] , _lowerCamelCase : torch.Tensor , _lowerCamelCase : torch.Tensor , _lowerCamelCase : Optional[torch.Tensor] = None , _lowerCamelCase : Optional[torch.Tensor] = None , ): _snake_case = self.transformer.transformer.wte(_lowerCamelCase ) _snake_case = self.encode_prefix(_lowerCamelCase ) _snake_case = self.decode_prefix(_lowerCamelCase ) _snake_case = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: _snake_case = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) _snake_case = torch.cat((dummy_token, input_ids) , dim=1 ) _snake_case = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : torch.device ): return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase ) def lowercase ( self : int , _lowerCamelCase : Any ): return self.encode_prefix(_lowerCamelCase ) @torch.no_grad() def lowercase ( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ): _snake_case = torch.split(_lowerCamelCase , 1 , dim=0 ) _snake_case = [] _snake_case = [] for feature in features: _snake_case = self.decode_prefix(feature.to(_lowerCamelCase ) ) # back to the clip feature # Only support beam search for now _snake_case , _snake_case = self.generate_beam( input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) _snake_case = torch.stack(_lowerCamelCase ) _snake_case = torch.stack(_lowerCamelCase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int = 5 , _lowerCamelCase : int = 67 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : Optional[int] = None , ): _snake_case = eos_token_id _snake_case = None _snake_case = None _snake_case = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int ) _snake_case = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool ) if input_embeds is not None: _snake_case = input_embeds else: _snake_case = self.transformer.transformer.wte(_lowerCamelCase ) for i in range(_lowerCamelCase ): _snake_case = self.transformer(inputs_embeds=_lowerCamelCase ) _snake_case = outputs.logits _snake_case = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _snake_case = logits.softmax(-1 ).log() if scores is None: _snake_case , _snake_case = logits.topk(_lowerCamelCase , -1 ) _snake_case = generated.expand(_lowerCamelCase , *generated.shape[1:] ) _snake_case , _snake_case = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: _snake_case = next_tokens else: _snake_case = tokens.expand(_lowerCamelCase , *tokens.shape[1:] ) _snake_case = torch.cat((tokens, next_tokens) , dim=1 ) else: _snake_case = -float(np.inf ) _snake_case = 0 _snake_case = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _snake_case = scores_sum / seq_lengths[:, None] _snake_case , _snake_case = scores_sum_average.view(-1 ).topk(_lowerCamelCase , -1 ) _snake_case = next_tokens // scores_sum.shape[1] _snake_case = seq_lengths[next_tokens_source] _snake_case = next_tokens % scores_sum.shape[1] _snake_case = next_tokens.unsqueeze(1 ) _snake_case = tokens[next_tokens_source] _snake_case = torch.cat((tokens, next_tokens) , dim=1 ) _snake_case = generated[next_tokens_source] _snake_case = scores_sum_average * seq_lengths _snake_case = is_stopped[next_tokens_source] _snake_case = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) _snake_case = torch.cat((generated, next_token_embed) , dim=1 ) _snake_case = is_stopped + next_tokens.eq(_lowerCamelCase ).squeeze() if is_stopped.all(): break _snake_case = scores / seq_lengths _snake_case = scores.argsort(descending=_lowerCamelCase ) # tokens tensors are already padded to max_seq_length _snake_case = [tokens[i] for i in order] _snake_case = torch.stack(_lowerCamelCase , dim=0 ) _snake_case = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any: stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 ) return arr def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(__lowerCamelCase , i + t , (__lowerCamelCase) ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
288
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase__ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } UpperCAmelCase__ = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, 'bert-base-german-cased': 512, 'bert-large-uncased-whole-word-masking': 512, 'bert-large-cased-whole-word-masking': 512, 'bert-large-uncased-whole-word-masking-finetuned-squad': 512, 'bert-large-cased-whole-word-masking-finetuned-squad': 512, 'bert-base-cased-finetuned-mrpc': 512, 'bert-base-german-dbmdz-cased': 512, 'bert-base-german-dbmdz-uncased': 512, 'TurkuNLP/bert-base-finnish-cased-v1': 512, 'TurkuNLP/bert-base-finnish-uncased-v1': 512, 'wietsedv/bert-base-dutch-cased': 512, } UpperCAmelCase__ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_INIT_CONFIGURATION __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = BertTokenizer def __init__( self : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=True , _lowerCamelCase : str="[UNK]" , _lowerCamelCase : List[str]="[SEP]" , _lowerCamelCase : List[Any]="[PAD]" , _lowerCamelCase : List[str]="[CLS]" , _lowerCamelCase : int="[MASK]" , _lowerCamelCase : Tuple=True , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] , ): super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars ): _snake_case = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) ) _snake_case = do_lower_case _snake_case = strip_accents _snake_case = tokenize_chinese_chars _snake_case = normalizer_class(**_lowerCamelCase ) _snake_case = do_lower_case def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : str=None ): _snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): _snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase )
288
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]: return 1 / (1 + np.exp(-z )) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]: return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]: _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]: _snake_case = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase__ = datasets.load_iris() UpperCAmelCase__ = iris.data[:, :2] UpperCAmelCase__ = (iris.target != 0) * 1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]: return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
288
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = embeddings_size _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = hidden_act _snake_case = num_labels _snake_case = scope _snake_case = len(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = TFResNetModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ): _snake_case = self.num_labels _snake_case = TFResNetForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Tuple ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] ): _snake_case = TFResNetModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : List[Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def lowercase ( self : Any ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ): _snake_case = model_class(_lowerCamelCase ) _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case = layer_type _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ): _snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
288
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'} UpperCAmelCase__ = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCAmelCase__ = { 'google/rembert': 256, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ): super().__init__( do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(_lowerCamelCase ) @property def lowercase ( self : int ): return len(self.sp_model ) def lowercase ( self : Any ): _snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__( self : List[str] , _lowerCamelCase : Tuple ): _snake_case = d _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ): _snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase ) return pieces def lowercase ( self : str , _lowerCamelCase : str ): return self.sp_model.PieceToId(_lowerCamelCase ) def lowercase ( self : List[str] , _lowerCamelCase : int ): return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = self.sp_model.decode_pieces(_lowerCamelCase ) return out_string def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
288
1
"""simple docstring""" import math import qiskit def _UpperCAmelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(__lowerCamelCase , __lowerCamelCase ) or isinstance(__lowerCamelCase , __lowerCamelCase ) or isinstance(__lowerCamelCase , __lowerCamelCase ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(__lowerCamelCase ) != input_a) or (math.floor(__lowerCamelCase ) != input_a) or (math.floor(__lowerCamelCase ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers _snake_case = qiskit.QuantumRegister(4 , '''qr''' ) _snake_case = qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries _snake_case = [input_a, input_a, carry_in] _snake_case = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(__lowerCamelCase ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(__lowerCamelCase ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(__lowerCamelCase ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , __lowerCamelCase ) # measure the last two qbits _snake_case = qiskit.Aer.get_backend('''aer_simulator''' ) _snake_case = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=10_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
288
"""simple docstring""" from math import pow def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) return current_sum, solutions_count def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( __lowerCamelCase : int | str ) -> bool: _snake_case = str(__lowerCamelCase ) return n == n[::-1] def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> Any: _snake_case = 0 for i in range(1 , __lowerCamelCase ): if is_palindrome(__lowerCamelCase ) and is_palindrome(bin(__lowerCamelCase ).split('''b''' )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
288
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Any ): _snake_case = tempfile.mkdtemp() # fmt: off _snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) _snake_case = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _snake_case = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Tuple , **_lowerCamelCase : Any ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : str , **_lowerCamelCase : Any ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : int , **_lowerCamelCase : Optional[int] ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Any ): _snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : Optional[Any] ): _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = self.get_image_processor() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) _snake_case = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = processor(text=_lowerCamelCase ) _snake_case = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowercase ( self : List[str] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case = processor.batch_decode(_lowerCamelCase ) _snake_case = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
288
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = ShapEImgaImgPipeline __a = ["""image"""] __a = ["""image"""] __a = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __a = False @property def lowercase ( self : str ): return 32 @property def lowercase ( self : Optional[int] ): return 32 @property def lowercase ( self : Union[str, Any] ): return self.time_input_dim * 4 @property def lowercase ( self : Tuple ): return 8 @property def lowercase ( self : Optional[Any] ): torch.manual_seed(0 ) _snake_case = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) _snake_case = CLIPVisionModel(_lowerCamelCase ) return model @property def lowercase ( self : List[str] ): _snake_case = CLIPImageProcessor( crop_size=224 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor @property def lowercase ( self : List[str] ): torch.manual_seed(0 ) _snake_case = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } _snake_case = PriorTransformer(**_lowerCamelCase ) return model @property def lowercase ( self : Optional[Any] ): torch.manual_seed(0 ) _snake_case = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } _snake_case = ShapERenderer(**_lowerCamelCase ) return model def lowercase ( self : Dict ): _snake_case = self.dummy_prior _snake_case = self.dummy_image_encoder _snake_case = self.dummy_image_processor _snake_case = self.dummy_renderer _snake_case = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_lowerCamelCase , clip_sample=_lowerCamelCase , clip_sample_range=1.0 , ) _snake_case = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def lowercase ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=0 ): _snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) if str(_lowerCamelCase ).startswith('''mps''' ): _snake_case = torch.manual_seed(_lowerCamelCase ) else: _snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) _snake_case = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def lowercase ( self : Union[str, Any] ): _snake_case = '''cpu''' _snake_case = self.get_dummy_components() _snake_case = self.pipeline_class(**_lowerCamelCase ) _snake_case = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) _snake_case = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) _snake_case = output.images[0] _snake_case = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _snake_case = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase ( self : Tuple ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase ( self : str ): _snake_case = torch_device == '''cpu''' _snake_case = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , ) def lowercase ( self : List[Any] ): _snake_case = self.get_dummy_components() _snake_case = self.pipeline_class(**_lowerCamelCase ) _snake_case = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) _snake_case = 1 _snake_case = 2 _snake_case = self.get_dummy_inputs(_lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: _snake_case = batch_size * [inputs[key]] _snake_case = pipe(**_lowerCamelCase , num_images_per_prompt=_lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase ( self : List[Any] ): _snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) _snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) _snake_case = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) _snake_case = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) _snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(0 ) _snake_case = pipe( _lowerCamelCase , generator=_lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
288
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort UpperCAmelCase__ = '1' UpperCAmelCase__ = '0' UpperCAmelCase__ = '1' UpperCAmelCase__ = ort.SessionOptions() UpperCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') UpperCAmelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider'] UpperCAmelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) UpperCAmelCase__ = ort.RunOptions() UpperCAmelCase__ = 128 UpperCAmelCase__ = 1 UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') UpperCAmelCase__ = time.time() UpperCAmelCase__ = 2000 UpperCAmelCase__ = {} for iter in range(max_iters): UpperCAmelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
288
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): __a = None def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]: import pyspark def generate_fn(): _snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case = partition_df.collect() _snake_case = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ): _snake_case = df _snake_case = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): yield from self.generate_examples_fn() def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ): _snake_case = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): _snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) @property def lowercase ( self : List[str] ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): __a = SparkConfig def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): import pyspark _snake_case = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case = df _snake_case = working_dir super().__init__( cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , ) def lowercase ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCamelCase : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCamelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def lowercase ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): import pyspark def get_arrow_batch_size(_lowerCamelCase : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case = self.df.count() _snake_case = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case = ( self.df.limit(_lowerCamelCase ) .repartition(1 ) .mapInArrow(_lowerCamelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) ) _snake_case = self.df.repartition(_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ): import pyspark _snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath _snake_case = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case = self.config.features _snake_case = self._writer_batch_size _snake_case = self._fs.storage_options def write_arrow(_lowerCamelCase : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case = pyspark.TaskContext().taskAttemptId() _snake_case = next(_lowerCamelCase , _lowerCamelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case = 0 _snake_case = writer_class( features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCamelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([batch] ) writer.write_table(_lowerCamelCase ) if writer._num_bytes > 0: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCamelCase ) ): _snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) ) shutil.move(_lowerCamelCase , _lowerCamelCase ) _snake_case = ( self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ): self._validate_cache_dir() _snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCamelCase ) _snake_case = not is_remote_filesystem(self._fs ) _snake_case = os.path.join if is_local else posixpath.join _snake_case = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case = path_join(self._output_dir , _lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = [] _snake_case = [] for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCamelCase ) _snake_case = total_num_examples _snake_case = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ): rename( _lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _snake_case = [] _snake_case = 0 for i in range(len(_lowerCamelCase ) ): _snake_case , _snake_case = task_id_and_num_shards[i] for shard_id in range(_lowerCamelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect() else: # don't use any pattern _snake_case = 0 _snake_case = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , ) def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
288
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig UpperCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( A_ ): __a = """masked_bert""" def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ): super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = pruning_method _snake_case = mask_init _snake_case = mask_scale
288
1
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowerCAmelCase__ ( A_ , A_ ): __a = 1 @register_to_config def __init__( self : Optional[Any] , _lowerCamelCase : int = 1000 , _lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(_lowerCamelCase ) # standard deviation of the initial noise distribution _snake_case = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _snake_case = 4 # running values _snake_case = [] def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Union[str, torch.device] = None ): _snake_case = num_inference_steps _snake_case = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _snake_case = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _snake_case = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _snake_case = torch.sin(steps * math.pi / 2 ) ** 2 _snake_case = (1.0 - self.betas**2) ** 0.5 _snake_case = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _snake_case = timesteps.to(_lowerCamelCase ) _snake_case = [] def lowercase ( self : Optional[Any] , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : int , _lowerCamelCase : torch.FloatTensor , _lowerCamelCase : bool = True , ): if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) _snake_case = (self.timesteps == timestep).nonzero().item() _snake_case = timestep_index + 1 _snake_case = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(_lowerCamelCase ) if len(self.ets ) == 1: _snake_case = self.ets[-1] elif len(self.ets ) == 2: _snake_case = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _snake_case = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _snake_case = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _snake_case = self._get_prev_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : torch.FloatTensor , *_lowerCamelCase : List[Any] , **_lowerCamelCase : str ): return sample def lowercase ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple ): _snake_case = self.alphas[timestep_index] _snake_case = self.betas[timestep_index] _snake_case = self.alphas[prev_timestep_index] _snake_case = self.betas[prev_timestep_index] _snake_case = (sample - sigma * ets) / max(_lowerCamelCase , 1e-8 ) _snake_case = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : Dict ): return self.config.num_train_timesteps
288
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): __a = None def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]: import pyspark def generate_fn(): _snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case = partition_df.collect() _snake_case = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ): _snake_case = df _snake_case = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): yield from self.generate_examples_fn() def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ): _snake_case = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): _snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) @property def lowercase ( self : List[str] ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): __a = SparkConfig def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): import pyspark _snake_case = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case = df _snake_case = working_dir super().__init__( cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , ) def lowercase ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCamelCase : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCamelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def lowercase ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): import pyspark def get_arrow_batch_size(_lowerCamelCase : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case = self.df.count() _snake_case = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case = ( self.df.limit(_lowerCamelCase ) .repartition(1 ) .mapInArrow(_lowerCamelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) ) _snake_case = self.df.repartition(_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ): import pyspark _snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath _snake_case = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case = self.config.features _snake_case = self._writer_batch_size _snake_case = self._fs.storage_options def write_arrow(_lowerCamelCase : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case = pyspark.TaskContext().taskAttemptId() _snake_case = next(_lowerCamelCase , _lowerCamelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case = 0 _snake_case = writer_class( features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCamelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([batch] ) writer.write_table(_lowerCamelCase ) if writer._num_bytes > 0: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCamelCase ) ): _snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) ) shutil.move(_lowerCamelCase , _lowerCamelCase ) _snake_case = ( self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ): self._validate_cache_dir() _snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCamelCase ) _snake_case = not is_remote_filesystem(self._fs ) _snake_case = os.path.join if is_local else posixpath.join _snake_case = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case = path_join(self._output_dir , _lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = [] _snake_case = [] for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCamelCase ) _snake_case = total_num_examples _snake_case = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ): rename( _lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _snake_case = [] _snake_case = 0 for i in range(len(_lowerCamelCase ) ): _snake_case , _snake_case = task_id_and_num_shards[i] for shard_id in range(_lowerCamelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect() else: # don't use any pattern _snake_case = 0 _snake_case = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , ) def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
288
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: def wrapper(*__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ): _snake_case = timeit.default_timer() _snake_case = func(*__lowerCamelCase , **__lowerCamelCase ) _snake_case = timeit.default_timer() - starttime return delta _snake_case = func.__name__ return wrapper def _UpperCAmelCase ( __lowerCamelCase : dict , __lowerCamelCase : Tuple=1_00 , __lowerCamelCase : Dict=None ) -> Optional[Any]: _snake_case = [] _snake_case = seq_shapes or {} for i in range(__lowerCamelCase ): _snake_case = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase , _ArrayXD ): _snake_case = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase , datasets.Value ): if v.dtype == "string": _snake_case = '''The small grey turtle was surprisingly fast when challenged.''' else: _snake_case = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase , datasets.Sequence ): while isinstance(__lowerCamelCase , datasets.Sequence ): _snake_case = v.feature _snake_case = seq_shapes[k] _snake_case = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) _snake_case = data dummy_data.append((i, example) ) return dummy_data def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=1_00 , __lowerCamelCase : Tuple=None ) -> str: _snake_case = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer: for key, record in dummy_data: _snake_case = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) _snake_case , _snake_case = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) _snake_case = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
288
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase__ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase__ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase__ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowercase ( self : Any ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple=4 , _lowerCamelCase : int=False ): _snake_case = compute_bleu( reference_corpus=_lowerCamelCase , translation_corpus=_lowerCamelCase , max_order=_lowerCamelCase , smooth=_lowerCamelCase ) ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
288
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]: _snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: _snake_case = '''''' else: _snake_case = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _snake_case = in_proj_weight[ : config.hidden_size, : ] _snake_case = in_proj_bias[: config.hidden_size] _snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case = in_proj_weight[ -config.hidden_size :, : ] _snake_case = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( ) -> Dict: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str: _snake_case = DeiTConfig() # all deit models have fine-tuned heads _snake_case = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _snake_case = 10_00 _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = int(deit_name[-6:-4] ) _snake_case = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _snake_case = 1_92 _snake_case = 7_68 _snake_case = 12 _snake_case = 3 elif deit_name[9:].startswith('''small''' ): _snake_case = 3_84 _snake_case = 15_36 _snake_case = 12 _snake_case = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _snake_case = 10_24 _snake_case = 40_96 _snake_case = 24 _snake_case = 16 # load original model from timm _snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _snake_case = timm_model.state_dict() _snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _snake_case = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size ) _snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) _snake_case = encoding['''pixel_values'''] _snake_case = model(__lowerCamelCase ) _snake_case = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
288
1
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int ) -> Optional[Any]: # Checks if the entire collection has been sorted if len(__lowerCamelCase ) <= 1 or n <= 1: return insert_next(__lowerCamelCase , n - 1 ) rec_insertion_sort(__lowerCamelCase , n - 1 ) def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int ) -> Optional[Any]: # Checks order between adjacent elements if index >= len(__lowerCamelCase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order _snake_case , _snake_case = ( collection[index], collection[index - 1], ) insert_next(__lowerCamelCase , index + 1 ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter integers separated by spaces: ') UpperCAmelCase__ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
288
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt' UpperCAmelCase__ = '"text": ["foo", "foo"]' UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase__ : __a = 200 __a = {"""Content-Length""": """100"""} __a = {} def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ): return [bytes(_lowerCamelCase , '''utf-8''' )] def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: import requests monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase ) _snake_case = URL if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = url elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [url] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': url} _snake_case = '''dummy''' _snake_case = '''downloads''' _snake_case = tmp_path _snake_case = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.download(__lowerCamelCase ) _snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [downloaded_paths] _snake_case = [urls] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in downloaded_paths.keys() _snake_case = downloaded_paths.values() _snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _snake_case = Path(__lowerCamelCase ) _snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _snake_case = downloaded_path.read_text() assert content == CONTENT _snake_case = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int: _snake_case = str(__lowerCamelCase ) if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = filename elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [filename] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': filename} _snake_case = '''dummy''' _snake_case = xz_file.parent _snake_case = '''extracted''' _snake_case = DownloadConfig( cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.extract(__lowerCamelCase ) _snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [extracted_paths] _snake_case = [paths] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in extracted_paths.keys() _snake_case = extracted_paths.values() _snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] _snake_case = Path(__lowerCamelCase ) _snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _snake_case = extracted_path.read_text() _snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__lowerCamelCase , start=1 ): _snake_case = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
288
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase__ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase__ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase__ = model.state_dict() UpperCAmelCase__ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase__ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"] UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 ) -> list: _snake_case = length or len(__lowerCamelCase ) _snake_case = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _snake_case , _snake_case = list_data[i + 1], list_data[i] _snake_case = True return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Union[str, Any]: _snake_case = [] _snake_case = [] _snake_case = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator _snake_case = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(__lowerCamelCase ) , '''Postfix'''.center(__lowerCamelCase ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(__lowerCamelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(__lowerCamelCase ) == 0: stack.append(__lowerCamelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(__lowerCamelCase ) # push x to stack print( x.center(8 ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format while len(__lowerCamelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ) , sep=''' | ''' , ) # Output in tabular format return "".join(__lowerCamelCase ) # return Postfix as str def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[Any]: _snake_case = list(infix[::-1] ) # reverse the infix equation for i in range(len(__lowerCamelCase ) ): if infix[i] == "(": _snake_case = ''')''' # change "(" to ")" elif infix[i] == ")": _snake_case = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": UpperCAmelCase__ = input('\nEnter an Infix Equation = ') # Input an Infix equation UpperCAmelCase__ = ''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
288
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5') UpperCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = [] UpperCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]: for attribute in key.split('''.''' ): _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value elif weight_type == "running_mean": _snake_case = value elif weight_type == "running_var": _snake_case = value elif weight_type == "num_batches_tracked": _snake_case = value else: _snake_case = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]: _snake_case = [] if task == "s2t": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2T _snake_case = IGNORE_KEYS_S2T elif task == "t2s": _snake_case = None _snake_case = MAPPING_T2S _snake_case = IGNORE_KEYS_T2S elif task == "s2s": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2S _snake_case = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__lowerCamelCase , __lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue _snake_case = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: _snake_case = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2] _snake_case = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: _snake_case = '''weight_g''' elif "weight_v" in name: _snake_case = '''weight_v''' elif "bias" in name: _snake_case = '''bias''' elif "weight" in name: _snake_case = '''weight''' elif "running_mean" in name: _snake_case = '''running_mean''' elif "running_var" in name: _snake_case = '''running_var''' elif "num_batches_tracked" in name: _snake_case = '''num_batches_tracked''' else: _snake_case = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = full_name.split('''conv_layers.''' )[-1] _snake_case = name.split('''.''' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict: if config_path is not None: _snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase ) else: _snake_case = SpeechTaConfig() if task == "s2t": _snake_case = config.max_text_positions _snake_case = SpeechTaForSpeechToText(__lowerCamelCase ) elif task == "t2s": _snake_case = 18_76 _snake_case = 6_00 _snake_case = config.max_speech_positions _snake_case = SpeechTaForTextToSpeech(__lowerCamelCase ) elif task == "s2s": _snake_case = 18_76 _snake_case = config.max_speech_positions _snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: _snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) _snake_case = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _snake_case = SpeechTaFeatureExtractor() _snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _snake_case = torch.load(__lowerCamelCase ) recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
288
1
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets UpperCAmelCase__ = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCAmelCase__ = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n' UpperCAmelCase__ = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def lowercase ( self : Any ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[ '''https://github.com/m-popovic/chrF''', ] , ) def lowercase ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int = CHRF.CHAR_ORDER , _lowerCamelCase : int = CHRF.WORD_ORDER , _lowerCamelCase : int = CHRF.BETA , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ): _snake_case = len(references[0] ) if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _snake_case = [[refs[i] for refs in references] for i in range(_lowerCamelCase )] _snake_case = CHRF(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _snake_case = sb_chrf.corpus_score(_lowerCamelCase , _lowerCamelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
288
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]: _snake_case = checkpoints.load_tax_checkpoint(__lowerCamelCase ) _snake_case = flatten_dict(__lowerCamelCase ) return flax_params def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]: _snake_case = {} _snake_case = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } _snake_case = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _snake_case = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = flax_dict[key] _snake_case = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _snake_case = torch.from_numpy(converted_dict[key].T ) else: _snake_case = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ) -> int: _snake_case = get_flax_param(__lowerCamelCase ) if not use_large: _snake_case = PixaStructVisionConfig() _snake_case = PixaStructTextConfig() else: _snake_case = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) _snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) _snake_case = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCamelCase ) _snake_case = PixaStructForConditionalGeneration(__lowerCamelCase ) _snake_case = rename_and_convert_flax_params(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) _snake_case = PixaStructImageProcessor() _snake_case = PixaStructProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase ) if use_large: _snake_case = 40_96 _snake_case = True # mkdir if needed os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) print('''Model saved in {}'''.format(__lowerCamelCase ) ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') UpperCAmelCase__ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
288
1
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = Path(tmpdirname) UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase__ = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
288
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase__ ( A_ ): def __lt__( self : Any , _lowerCamelCase : int ): return self[-1] < other[-1] def __eq__( self : int , _lowerCamelCase : Optional[Any] ): return self[-1] == other[-1] def _UpperCAmelCase ( __lowerCamelCase : list ) -> list: _snake_case = [] # sort into stacks for element in collection: _snake_case = Stack([element] ) _snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase ) if i != len(__lowerCamelCase ): stacks[i].append(__lowerCamelCase ) else: stacks.append(__lowerCamelCase ) # use a heap-based merge to merge stack efficiently _snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
288
1
"""simple docstring""" from manim import * class lowerCAmelCase__ ( A_ ): def lowercase ( self : Any ): _snake_case = Rectangle(height=0.5 , width=0.5 ) _snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) _snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) _snake_case = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) _snake_case = Text('''CPU''' , font_size=24 ) _snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_lowerCamelCase ) _snake_case = [mem.copy() for i in range(1 )] _snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) _snake_case = Text('''GPU''' , font_size=24 ) _snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) gpu.align_to(_lowerCamelCase , _lowerCamelCase ) gpu.set_x(gpu.get_x() - 1 ) self.add(_lowerCamelCase ) _snake_case = [mem.copy() for i in range(6 )] _snake_case = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 ) _snake_case = Text('''Model''' , font_size=24 ) _snake_case = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.play( Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , ) _snake_case = MarkupText( f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , ) _snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) ) self.add(_lowerCamelCase ) _snake_case = [] _snake_case = [] _snake_case = [] for i, rect in enumerate(_lowerCamelCase ): _snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 ) cpu_target.move_to(_lowerCamelCase ) cpu_target.generate_target() _snake_case = 0.4_6 / 4 _snake_case = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_lowerCamelCase ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 ) cpu_targs.append(_lowerCamelCase ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) ) second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) ) self.play(*_lowerCamelCase ) self.play(*_lowerCamelCase ) self.wait()
288
"""simple docstring""" UpperCAmelCase__ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
288
1
"""simple docstring""" import argparse import os import re import packaging.version UpperCAmelCase__ = 'examples/' UpperCAmelCase__ = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase__ = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase__ = 'README.md' def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : int ) -> Union[str, Any]: with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _snake_case = f.read() _snake_case , _snake_case = REPLACE_PATTERNS[pattern] _snake_case = replace.replace('''VERSION''' , __lowerCamelCase ) _snake_case = re_pattern.sub(__lowerCamelCase , __lowerCamelCase ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> str: for folder, directories, fnames in os.walk(__lowerCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , pattern='''examples''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=False ) -> Optional[Any]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not patch: update_version_in_examples(__lowerCamelCase ) def _UpperCAmelCase ( ) -> List[str]: _snake_case = '''🤗 Transformers currently provides the following architectures''' _snake_case = '''1. Want to contribute a new model?''' with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _snake_case = f.readlines() # Find the start of the list. _snake_case = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _snake_case = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _snake_case = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , ) index += 1 with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__lowerCamelCase ) def _UpperCAmelCase ( ) -> Any: with open(REPLACE_FILES['''init'''] , '''r''' ) as f: _snake_case = f.read() _snake_case = REPLACE_PATTERNS['''init'''][0].search(__lowerCamelCase ).groups()[0] return packaging.version.parse(__lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]: _snake_case = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _snake_case = default_version.base_version elif patch: _snake_case = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: _snake_case = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. _snake_case = input(f'''Which version are you releasing? [{default_version}]''' ) if len(__lowerCamelCase ) == 0: _snake_case = default_version print(f'''Updating version to {version}.''' ) global_version_update(__lowerCamelCase , patch=__lowerCamelCase ) def _UpperCAmelCase ( ) -> str: _snake_case = get_version() _snake_case = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' _snake_case = current_version.base_version # Check with the user we got that right. _snake_case = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(__lowerCamelCase ) == 0: _snake_case = dev_version print(f'''Updating version to {version}.''' ) global_version_update(__lowerCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
288
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = embeddings_size _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = hidden_act _snake_case = num_labels _snake_case = scope _snake_case = len(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = TFResNetModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ): _snake_case = self.num_labels _snake_case = TFResNetForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Tuple ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] ): _snake_case = TFResNetModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : List[Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def lowercase ( self : Any ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ): _snake_case = model_class(_lowerCamelCase ) _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case = layer_type _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ): _snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
288
1
"""simple docstring""" from math import isqrt def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCamelCase ) + 1 ) ) def _UpperCAmelCase ( __lowerCamelCase : int = 10**6 ) -> int: _snake_case = 0 _snake_case = 1 _snake_case = 7 while prime_candidate < max_prime: primes_count += is_prime(__lowerCamelCase ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"{solution() = }")
288
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = Path(tmpdirname) UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase__ = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
288
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCAmelCase__ ( A_ ): __a = """pegasus""" __a = ["""past_key_values"""] __a = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : int , _lowerCamelCase : Optional[Any]=50265 , _lowerCamelCase : Tuple=1024 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Any=4096 , _lowerCamelCase : List[str]=16 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : Any=4096 , _lowerCamelCase : str=16 , _lowerCamelCase : int=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : str=1024 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Optional[int]=0.0_2 , _lowerCamelCase : str=0 , _lowerCamelCase : Any=False , _lowerCamelCase : int=0 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Union[str, Any]=1 , **_lowerCamelCase : str , ): _snake_case = vocab_size _snake_case = max_position_embeddings _snake_case = d_model _snake_case = encoder_ffn_dim _snake_case = encoder_layers _snake_case = encoder_attention_heads _snake_case = decoder_ffn_dim _snake_case = decoder_layers _snake_case = decoder_attention_heads _snake_case = dropout _snake_case = attention_dropout _snake_case = activation_dropout _snake_case = activation_function _snake_case = init_std _snake_case = encoder_layerdrop _snake_case = decoder_layerdrop _snake_case = use_cache _snake_case = encoder_layers _snake_case = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , ) @property def lowercase ( self : List[Any] ): return self.encoder_attention_heads @property def lowercase ( self : str ): return self.d_model
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = limit + 1 _snake_case = [0] * limit for first_term in range(1 , __lowerCamelCase ): for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _snake_case = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import math def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float: if ( not isinstance(__lowerCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float: if ( not isinstance(__lowerCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
288
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
1
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]: return 1 / (1 + np.exp(-z )) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]: return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]: _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]: _snake_case = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase__ = datasets.load_iris() UpperCAmelCase__ = iris.data[:, :2] UpperCAmelCase__ = (iris.target != 0) * 1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]: return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
288
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ' Hello world! cécé herlolip' UpperCAmelCase__ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]: _snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str: _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' ) _snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _snake_case = emb.weight.data return lin_layer @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]: if not os.path.exists(__lowerCamelCase ): _snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval() else: _snake_case = load_xsum_checkpoint(__lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _snake_case = checkpoint_path.replace('''.''' , '''-''' ) _snake_case = BartConfig.from_pretrained(__lowerCamelCase ) _snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 ) _snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _snake_case = bart.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = BartForSequenceClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase ) _snake_case = model(__lowerCamelCase )[0] # logits else: # no classification heads to worry about _snake_case = bart.model.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''decoder.embed_tokens.weight'''] _snake_case = bart.extract_features(__lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": _snake_case = BartModel(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = model(__lowerCamelCase ).model[0] else: _snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowerCamelCase ) if hasattr(__lowerCamelCase , '''lm_head''' ): _snake_case = make_linear_from_emb(model.model.shared ) _snake_case = model.model(__lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
288
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]: _snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: _snake_case = '''''' else: _snake_case = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _snake_case = in_proj_weight[ : config.hidden_size, : ] _snake_case = in_proj_bias[: config.hidden_size] _snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case = in_proj_weight[ -config.hidden_size :, : ] _snake_case = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( ) -> Dict: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str: _snake_case = DeiTConfig() # all deit models have fine-tuned heads _snake_case = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _snake_case = 10_00 _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = int(deit_name[-6:-4] ) _snake_case = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _snake_case = 1_92 _snake_case = 7_68 _snake_case = 12 _snake_case = 3 elif deit_name[9:].startswith('''small''' ): _snake_case = 3_84 _snake_case = 15_36 _snake_case = 12 _snake_case = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _snake_case = 10_24 _snake_case = 40_96 _snake_case = 24 _snake_case = 16 # load original model from timm _snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _snake_case = timm_model.state_dict() _snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _snake_case = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size ) _snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) _snake_case = encoding['''pixel_values'''] _snake_case = model(__lowerCamelCase ) _snake_case = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any: stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 ) return arr def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(__lowerCamelCase , i + t , (__lowerCamelCase) ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
288
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> str: # vision encoder if "img_encoder.pos_embed" in name: _snake_case = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' ) if "img_encoder.patch_embed.proj" in name: _snake_case = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' ) if "img_encoder.patch_embed.norm" in name: _snake_case = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' ) if "img_encoder.layers" in name: _snake_case = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' ) if "blocks" in name and "res" not in name: _snake_case = name.replace('''blocks''' , '''layers''' ) if "attn" in name and "pre_assign" not in name: _snake_case = name.replace('''attn''' , '''self_attn''' ) if "proj" in name and "self_attn" in name and "text" not in name: _snake_case = name.replace('''proj''' , '''out_proj''' ) if "pre_assign_attn.attn.proj" in name: _snake_case = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' ) if "norm1" in name: _snake_case = name.replace('''norm1''' , '''layer_norm1''' ) if "norm2" in name and "pre_assign" not in name: _snake_case = name.replace('''norm2''' , '''layer_norm2''' ) if "img_encoder.norm" in name: _snake_case = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' ) # text encoder if "text_encoder.token_embedding" in name: _snake_case = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' ) if "text_encoder.positional_embedding" in name: _snake_case = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "text_encoder.transformer.resblocks." in name: _snake_case = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' ) if "ln_1" in name: _snake_case = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: _snake_case = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: _snake_case = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: _snake_case = name.replace('''c_proj''' , '''fc2''' ) if "text_encoder" in name: _snake_case = name.replace('''text_encoder''' , '''text_model''' ) if "ln_final" in name: _snake_case = name.replace('''ln_final''' , '''final_layer_norm''' ) # projection layers if "img_projector.linear_hidden." in name: _snake_case = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' ) if "img_projector.linear_out." in name: _snake_case = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' ) if "text_projector.linear_hidden" in name: _snake_case = name.replace('''text_projector.linear_hidden''' , '''text_projection''' ) if "text_projector.linear_out" in name: _snake_case = name.replace('''text_projector.linear_out''' , '''text_projection.3''' ) return name def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple: for key in orig_state_dict.copy().keys(): _snake_case = orig_state_dict.pop(__lowerCamelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _snake_case = key.split('''.''' ) _snake_case , _snake_case = int(key_split[2] ), int(key_split[4] ) _snake_case = config.vision_config.hidden_size if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[dim : dim * 2, :] _snake_case = val[-dim:, :] else: _snake_case = val[:dim] _snake_case = val[dim : dim * 2] _snake_case = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _snake_case = key.split('''.''' ) _snake_case = int(key_split[3] ) _snake_case = config.text_config.hidden_size if "weight" in key: _snake_case = val[:dim, :] _snake_case = val[ dim : dim * 2, : ] _snake_case = val[-dim:, :] else: _snake_case = val[:dim] _snake_case = val[dim : dim * 2] _snake_case = val[-dim:] else: _snake_case = rename_key(__lowerCamelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): _snake_case = val.squeeze_() else: _snake_case = val return orig_state_dict def _UpperCAmelCase ( ) -> List[str]: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]="groupvit-gcc-yfcc" , __lowerCamelCase : str=False ) -> int: _snake_case = GroupViTConfig() _snake_case = GroupViTModel(__lowerCamelCase ).eval() _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model'''] _snake_case = convert_state_dict(__lowerCamelCase , __lowerCamelCase ) _snake_case , _snake_case = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCamelCase ) == 0) # verify result _snake_case = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) _snake_case = prepare_img() _snake_case = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''pt''' ) with torch.no_grad(): _snake_case = model(**__lowerCamelCase ) if model_name == "groupvit-gcc-yfcc": _snake_case = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": _snake_case = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image , __lowerCamelCase , atol=1E-3 ) processor.save_pretrained(__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) print('''Successfully saved processor and model to''' , __lowerCamelCase ) if push_to_hub: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) model.push_to_hub(__lowerCamelCase , organization='''nielsr''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) UpperCAmelCase__ = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
288
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]: return 1 / (1 + np.exp(-z )) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]: return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]: _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]: _snake_case = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase__ = datasets.load_iris() UpperCAmelCase__ = iris.data[:, :2] UpperCAmelCase__ = (iris.target != 0) * 1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]: return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
288
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : str ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) _snake_case = sorted(string.lower() ) return len(__lowerCamelCase ) == len(set(__lowerCamelCase ) ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter a string ').strip() UpperCAmelCase__ = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
288
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'} UpperCAmelCase__ = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCAmelCase__ = { 'google/rembert': 256, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ): super().__init__( do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(_lowerCamelCase ) @property def lowercase ( self : int ): return len(self.sp_model ) def lowercase ( self : Any ): _snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__( self : List[str] , _lowerCamelCase : Tuple ): _snake_case = d _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ): _snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase ) return pieces def lowercase ( self : str , _lowerCamelCase : str ): return self.sp_model.PieceToId(_lowerCamelCase ) def lowercase ( self : List[str] , _lowerCamelCase : int ): return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = self.sp_model.decode_pieces(_lowerCamelCase ) return out_string def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
288
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } UpperCAmelCase__ = { 'facebook/bart-base': 1024, 'facebook/bart-large': 1024, 'facebook/bart-large-mnli': 1024, 'facebook/bart-large-cnn': 1024, 'facebook/bart-large-xsum': 1024, 'yjernite/bart_eli5': 1024, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] __a = BartTokenizer def __init__( self : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : int=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[str]="replace" , _lowerCamelCase : Any="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Tuple="<s>" , _lowerCamelCase : Any="<unk>" , _lowerCamelCase : Optional[int]="<pad>" , _lowerCamelCase : List[Any]="<mask>" , _lowerCamelCase : Any=False , _lowerCamelCase : int=True , **_lowerCamelCase : Tuple , ): super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: _snake_case = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) ) _snake_case = add_prefix_space _snake_case = pre_tok_class(**_lowerCamelCase ) _snake_case = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _snake_case = '''post_processor''' _snake_case = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: _snake_case = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _snake_case = tuple(state['''sep'''] ) if "cls" in state: _snake_case = tuple(state['''cls'''] ) _snake_case = False if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space: _snake_case = add_prefix_space _snake_case = True if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets: _snake_case = trim_offsets _snake_case = True if changes_to_apply: _snake_case = getattr(_lowerCamelCase , state.pop('''type''' ) ) _snake_case = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property def lowercase ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowercase ( self : Optional[Any] , _lowerCamelCase : List[Any] ): _snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value _snake_case = value def lowercase ( self : Optional[int] , *_lowerCamelCase : str , **_lowerCamelCase : int ): _snake_case = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : Any , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[str] ): _snake_case = kwargs.get('''is_split_into_words''' , _lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): _snake_case = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict=None ): _snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
288
"""simple docstring""" from math import pow def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) return current_sum, solutions_count def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ : def __init__( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Optional[int]=30 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : Union[str, Any]=32 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : int=37 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : Dict=0.0_2 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _snake_case = (image_size // patch_size) ** 2 _snake_case = num_patches + 1 def lowercase ( self : Union[str, Any] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Dict ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , ) def lowercase ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = TFViTModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. _snake_case = self.image_size // 2 _snake_case = pixel_values[:, :, :image_size, :image_size] _snake_case = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase ) _snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str ): _snake_case = self.type_sequence_label_size _snake_case = TFViTForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. _snake_case = self.image_size // 2 _snake_case = pixel_values[:, :, :image_size, :image_size] _snake_case = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFViTForImageClassification(_lowerCamelCase ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase ( self : Optional[Any] ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False def lowercase ( self : Union[str, Any] ): _snake_case = TFViTModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def lowercase ( self : int ): pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def lowercase ( self : Optional[int] ): pass def lowercase ( self : str ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) ) def lowercase ( self : List[str] ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Dict ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : Tuple ): _snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Optional[Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Optional[int] ): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def lowercase ( self : Any ): _snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
288
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Any ): _snake_case = tempfile.mkdtemp() # fmt: off _snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) _snake_case = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _snake_case = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Tuple , **_lowerCamelCase : Any ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : str , **_lowerCamelCase : Any ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : int , **_lowerCamelCase : Optional[int] ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Any ): _snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : Optional[Any] ): _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = self.get_image_processor() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) _snake_case = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = processor(text=_lowerCamelCase ) _snake_case = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowercase ( self : List[str] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case = processor.batch_decode(_lowerCamelCase ) _snake_case = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
288
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 16 UpperCAmelCase__ = 32 def _UpperCAmelCase ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 ) -> Optional[int]: _snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _snake_case = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCamelCase : str ): # max_length=None => use the model max length (it's actually the default) _snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCamelCase : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case = 16 elif accelerator.mixed_precision != "no": _snake_case = 8 else: _snake_case = None return tokenizer.pad( __lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. _snake_case = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) _snake_case = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> List[str]: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1": _snake_case = 2 # New Code # _snake_case = int(args.gradient_accumulation_steps ) _snake_case = int(args.local_sgd_steps ) # Initialize accelerator _snake_case = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCamelCase ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case = config['''lr'''] _snake_case = int(config['''num_epochs'''] ) _snake_case = int(config['''seed'''] ) _snake_case = int(config['''batch_size'''] ) _snake_case = evaluate.load('''glue''' , '''mrpc''' ) set_seed(__lowerCamelCase ) _snake_case , _snake_case = get_dataloaders(__lowerCamelCase , __lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case = model.to(accelerator.device ) # Instantiate optimizer _snake_case = AdamW(params=model.parameters() , lr=__lowerCamelCase ) # Instantiate scheduler _snake_case = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Now we train the model for epoch in range(__lowerCamelCase ): model.train() with LocalSGD( accelerator=__lowerCamelCase , model=__lowerCamelCase , local_sgd_steps=__lowerCamelCase , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowerCamelCase ): _snake_case = model(**__lowerCamelCase ) _snake_case = output.loss accelerator.backward(__lowerCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case = model(**__lowerCamelCase ) _snake_case = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) _snake_case = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , __lowerCamelCase ) def _UpperCAmelCase ( ) -> Tuple: _snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=__lowerCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) _snake_case = parser.parse_args() _snake_case = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
288
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort UpperCAmelCase__ = '1' UpperCAmelCase__ = '0' UpperCAmelCase__ = '1' UpperCAmelCase__ = ort.SessionOptions() UpperCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') UpperCAmelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider'] UpperCAmelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) UpperCAmelCase__ = ort.RunOptions() UpperCAmelCase__ = 128 UpperCAmelCase__ = 1 UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') UpperCAmelCase__ = time.time() UpperCAmelCase__ = 2000 UpperCAmelCase__ = {} for iter in range(max_iters): UpperCAmelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
288
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece.model') UpperCAmelCase__ = {'target_lang': 'fi', 'source_lang': 'en'} UpperCAmelCase__ = '>>zh<<' UpperCAmelCase__ = 'Helsinki-NLP/' if is_torch_available(): UpperCAmelCase__ = 'pt' elif is_tf_available(): UpperCAmelCase__ = 'tf' else: UpperCAmelCase__ = 'jax' @require_sentencepiece class lowerCAmelCase__ ( A_ , unittest.TestCase ): __a = MarianTokenizer __a = False __a = True def lowercase ( self : Optional[int] ): super().setUp() _snake_case = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] _snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) _snake_case = Path(self.tmpdirname ) save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] ) save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] ) copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] ) _snake_case = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : int ): return ( "This is a test", "This is a test", ) def lowercase ( self : Tuple ): _snake_case = '''</s>''' _snake_case = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(_lowerCamelCase ) , 9 ) def lowercase ( self : List[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def lowercase ( self : List[str] ): _snake_case = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' ) _snake_case = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) _snake_case = [38, 121, 14, 697, 38848, 0] self.assertListEqual(_lowerCamelCase , batch.input_ids[0] ) _snake_case = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(_lowerCamelCase ) _snake_case = [x.name for x in Path(_lowerCamelCase ).glob('''*''' )] self.assertIn('''source.spm''' , _lowerCamelCase ) MarianTokenizer.from_pretrained(_lowerCamelCase ) def lowercase ( self : Tuple ): _snake_case = self.get_tokenizer() _snake_case = tok( ['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def lowercase ( self : Optional[int] ): _snake_case = self.get_tokenizer() _snake_case = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def lowercase ( self : List[str] ): # fmt: off _snake_case = {'''input_ids''': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , ) def lowercase ( self : Optional[int] ): _snake_case = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' ) _snake_case = '''Tämä on testi''' _snake_case = '''This is a test''' _snake_case = [76, 7, 2047, 2] _snake_case = [69, 12, 11, 940, 2] _snake_case = tokenizer(_lowerCamelCase ).input_ids self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = tokenizer(text_target=_lowerCamelCase ).input_ids self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase )
288
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig UpperCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( A_ ): __a = """masked_bert""" def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ): super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = pruning_method _snake_case = mask_init _snake_case = mask_scale
288
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( A_ ): __a = (UniPCMultistepScheduler,) __a = (("""num_inference_steps""", 25),) def lowercase ( self : Dict , **_lowerCamelCase : str ): _snake_case = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**_lowerCamelCase ) return config def lowercase ( self : str , _lowerCamelCase : List[Any]=0 , **_lowerCamelCase : List[Any] ): _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample _snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config(**_lowerCamelCase ) _snake_case = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals _snake_case = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) _snake_case = scheduler_class.from_pretrained(_lowerCamelCase ) new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals _snake_case = dummy_past_residuals[: new_scheduler.config.solver_order] _snake_case , _snake_case = sample, sample for t in range(_lowerCamelCase , time_step + scheduler.config.solver_order + 1 ): _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample _snake_case = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : str , _lowerCamelCase : List[Any]=0 , **_lowerCamelCase : List[str] ): _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample _snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _snake_case = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) _snake_case = scheduler_class.from_pretrained(_lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) _snake_case = dummy_past_residuals[: new_scheduler.config.solver_order] _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample _snake_case = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase ( self : int , _lowerCamelCase : str=None , **_lowerCamelCase : List[Any] ): if scheduler is None: _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config(**_lowerCamelCase ) _snake_case = scheduler_class(**_lowerCamelCase ) _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config(**_lowerCamelCase ) _snake_case = scheduler_class(**_lowerCamelCase ) _snake_case = 10 _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter scheduler.set_timesteps(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): _snake_case = model(_lowerCamelCase , _lowerCamelCase ) _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample return sample def lowercase ( self : Tuple ): _snake_case = dict(self.forward_default_kwargs ) _snake_case = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config() _snake_case = scheduler_class(**_lowerCamelCase ) _snake_case = self.dummy_sample _snake_case = 0.1 * sample if num_inference_steps is not None and hasattr(_lowerCamelCase , '''set_timesteps''' ): scheduler.set_timesteps(_lowerCamelCase ) elif num_inference_steps is not None and not hasattr(_lowerCamelCase , '''set_timesteps''' ): _snake_case = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] _snake_case = dummy_past_residuals[: scheduler.config.solver_order] _snake_case = scheduler.timesteps[5] _snake_case = scheduler.timesteps[6] _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowercase ( self : Dict ): # make sure that iterating over schedulers with same config names gives same results # for defaults _snake_case = UniPCMultistepScheduler(**self.get_scheduler_config() ) _snake_case = self.full_loop(scheduler=_lowerCamelCase ) _snake_case = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 _snake_case = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _snake_case = DEISMultistepScheduler.from_config(scheduler.config ) _snake_case = DPMSolverMultistepScheduler.from_config(scheduler.config ) _snake_case = UniPCMultistepScheduler.from_config(scheduler.config ) _snake_case = self.full_loop(scheduler=_lowerCamelCase ) _snake_case = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def lowercase ( self : Union[str, Any] ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def lowercase ( self : Optional[Any] ): self.check_over_configs(thresholding=_lowerCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , ) def lowercase ( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def lowercase ( self : Any ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , ) _snake_case = self.full_loop( solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , ) assert not torch.isnan(_lowerCamelCase ).any(), "Samples have nan numbers" def lowercase ( self : Union[str, Any] ): self.check_over_configs(lower_order_final=_lowerCamelCase ) self.check_over_configs(lower_order_final=_lowerCamelCase ) def lowercase ( self : List[str] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=0 ) def lowercase ( self : Optional[int] ): _snake_case = self.full_loop() _snake_case = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def lowercase ( self : Dict ): _snake_case = self.full_loop(prediction_type='''v_prediction''' ) _snake_case = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3 def lowercase ( self : Optional[Any] ): _snake_case = self.scheduler_classes[0] _snake_case = self.get_scheduler_config(thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0 ) _snake_case = scheduler_class(**_lowerCamelCase ) _snake_case = 10 _snake_case = self.dummy_model() _snake_case = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): _snake_case = model(_lowerCamelCase , _lowerCamelCase ) _snake_case = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample assert sample.dtype == torch.floataa def lowercase ( self : Union[str, Any] , **_lowerCamelCase : List[str] ): for scheduler_class in self.scheduler_classes: _snake_case = self.get_scheduler_config(**_lowerCamelCase ) _snake_case = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
288
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): __a = None def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]: import pyspark def generate_fn(): _snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case = partition_df.collect() _snake_case = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ): _snake_case = df _snake_case = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): yield from self.generate_examples_fn() def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ): _snake_case = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): _snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) @property def lowercase ( self : List[str] ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): __a = SparkConfig def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): import pyspark _snake_case = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case = df _snake_case = working_dir super().__init__( cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , ) def lowercase ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCamelCase : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCamelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def lowercase ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): import pyspark def get_arrow_batch_size(_lowerCamelCase : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case = self.df.count() _snake_case = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case = ( self.df.limit(_lowerCamelCase ) .repartition(1 ) .mapInArrow(_lowerCamelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) ) _snake_case = self.df.repartition(_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ): import pyspark _snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath _snake_case = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case = self.config.features _snake_case = self._writer_batch_size _snake_case = self._fs.storage_options def write_arrow(_lowerCamelCase : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case = pyspark.TaskContext().taskAttemptId() _snake_case = next(_lowerCamelCase , _lowerCamelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case = 0 _snake_case = writer_class( features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCamelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([batch] ) writer.write_table(_lowerCamelCase ) if writer._num_bytes > 0: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCamelCase ) ): _snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) ) shutil.move(_lowerCamelCase , _lowerCamelCase ) _snake_case = ( self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ): self._validate_cache_dir() _snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCamelCase ) _snake_case = not is_remote_filesystem(self._fs ) _snake_case = os.path.join if is_local else posixpath.join _snake_case = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case = path_join(self._output_dir , _lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = [] _snake_case = [] for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCamelCase ) _snake_case = total_num_examples _snake_case = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ): rename( _lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _snake_case = [] _snake_case = 0 for i in range(len(_lowerCamelCase ) ): _snake_case , _snake_case = task_id_and_num_shards[i] for shard_id in range(_lowerCamelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect() else: # don't use any pattern _snake_case = 0 _snake_case = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , ) def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
288
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> list: _snake_case = len(__lowerCamelCase ) _snake_case = [] for i in range(len(__lowerCamelCase ) - pat_len + 1 ): _snake_case = True for j in range(__lowerCamelCase ): if s[i + j] != pattern[j]: _snake_case = False break if match_found: position.append(__lowerCamelCase ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
288
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): __a = """mask2former""" __a = ["""swin"""] __a = {"""hidden_size""": """hidden_dim"""} def __init__( self : List[str] , _lowerCamelCase : Optional[Dict] = None , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 256 , _lowerCamelCase : int = 1024 , _lowerCamelCase : str = "relu" , _lowerCamelCase : int = 6 , _lowerCamelCase : int = 10 , _lowerCamelCase : int = 8 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 2048 , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : int = 4 , _lowerCamelCase : int = 255 , _lowerCamelCase : int = 100 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 2.0 , _lowerCamelCase : float = 5.0 , _lowerCamelCase : float = 5.0 , _lowerCamelCase : int = 12544 , _lowerCamelCase : float = 3.0 , _lowerCamelCase : float = 0.7_5 , _lowerCamelCase : float = 0.0_2 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : bool = True , _lowerCamelCase : List[int] = [4, 8, 16, 32] , _lowerCamelCase : bool = None , **_lowerCamelCase : List[str] , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) _snake_case = CONFIG_MAPPING['''swin''']( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(_lowerCamelCase , _lowerCamelCase ): _snake_case = backbone_config.pop('''model_type''' ) _snake_case = CONFIG_MAPPING[backbone_model_type] _snake_case = config_class.from_dict(_lowerCamelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' f'''Supported model types: {','.join(self.backbones_supported )}''' ) _snake_case = backbone_config _snake_case = feature_size _snake_case = mask_feature_size _snake_case = hidden_dim _snake_case = encoder_feedforward_dim _snake_case = activation_function _snake_case = encoder_layers _snake_case = decoder_layers _snake_case = num_attention_heads _snake_case = dropout _snake_case = dim_feedforward _snake_case = pre_norm _snake_case = enforce_input_projection _snake_case = common_stride _snake_case = ignore_value _snake_case = num_queries _snake_case = no_object_weight _snake_case = class_weight _snake_case = mask_weight _snake_case = dice_weight _snake_case = train_num_points _snake_case = oversample_ratio _snake_case = importance_sample_ratio _snake_case = init_std _snake_case = init_xavier_std _snake_case = use_auxiliary_loss _snake_case = feature_strides _snake_case = output_auxiliary_logits _snake_case = decoder_layers super().__init__(**_lowerCamelCase ) @classmethod def lowercase ( cls : int , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : Dict ): return cls( backbone_config=_lowerCamelCase , **_lowerCamelCase , ) def lowercase ( self : Optional[Any] ): _snake_case = copy.deepcopy(self.__dict__ ) _snake_case = self.backbone_config.to_dict() _snake_case = self.__class__.model_type return output
288
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]: _snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: _snake_case = '''''' else: _snake_case = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _snake_case = in_proj_weight[ : config.hidden_size, : ] _snake_case = in_proj_bias[: config.hidden_size] _snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case = in_proj_weight[ -config.hidden_size :, : ] _snake_case = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( ) -> Dict: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str: _snake_case = DeiTConfig() # all deit models have fine-tuned heads _snake_case = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _snake_case = 10_00 _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = int(deit_name[-6:-4] ) _snake_case = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _snake_case = 1_92 _snake_case = 7_68 _snake_case = 12 _snake_case = 3 elif deit_name[9:].startswith('''small''' ): _snake_case = 3_84 _snake_case = 15_36 _snake_case = 12 _snake_case = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _snake_case = 10_24 _snake_case = 40_96 _snake_case = 24 _snake_case = 16 # load original model from timm _snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _snake_case = timm_model.state_dict() _snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _snake_case = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size ) _snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) _snake_case = encoding['''pixel_values'''] _snake_case = model(__lowerCamelCase ) _snake_case = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
288
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore UpperCAmelCase__ = '\nHuman: <<task>>\n\nAssistant: ' UpperCAmelCase__ = 'huggingface-tools/default-prompts' UpperCAmelCase__ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict="run" ) -> Any: if prompt_or_repo_id is None: _snake_case = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , __lowerCamelCase ) is not None: return prompt_or_repo_id _snake_case = cached_file( __lowerCamelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f: return f.read()
288
"""simple docstring""" import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt' UpperCAmelCase__ = '"text": ["foo", "foo"]' UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase__ : __a = 200 __a = {"""Content-Length""": """100"""} __a = {} def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ): return [bytes(_lowerCamelCase , '''utf-8''' )] def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict: return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: import requests monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase ) _snake_case = URL if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = url elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [url] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': url} _snake_case = '''dummy''' _snake_case = '''downloads''' _snake_case = tmp_path _snake_case = DownloadConfig( cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.download(__lowerCamelCase ) _snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [downloaded_paths] _snake_case = [urls] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in downloaded_paths.keys() _snake_case = downloaded_paths.values() _snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] _snake_case = Path(__lowerCamelCase ) _snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() _snake_case = downloaded_path.read_text() assert content == CONTENT _snake_case = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() _snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int: _snake_case = str(__lowerCamelCase ) if issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = filename elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = [filename] elif issubclass(__lowerCamelCase , __lowerCamelCase ): _snake_case = {'''train''': filename} _snake_case = '''dummy''' _snake_case = xz_file.parent _snake_case = '''extracted''' _snake_case = DownloadConfig( cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , ) _snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase ) _snake_case = dl_manager.extract(__lowerCamelCase ) _snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = [extracted_paths] _snake_case = [paths] elif isinstance(__lowerCamelCase , __lowerCamelCase ): assert "train" in extracted_paths.keys() _snake_case = extracted_paths.values() _snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] _snake_case = Path(__lowerCamelCase ) _snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() _snake_case = extracted_path.read_text() _snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict: assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__lowerCamelCase , start=1 ): _snake_case = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = request.getfixturevalue(__lowerCamelCase ) _snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ): _test_jsonl(__lowerCamelCase , __lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ): assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
288
1
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> list[int]: _snake_case = 0 _snake_case = len(__lowerCamelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _snake_case = i + 1 else: _snake_case = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"{two_pointer([2, 7, 11, 15], 9) = }")
288
"""simple docstring""" import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') UpperCAmelCase__ = parser.parse_args() if args.model_type == "bert": UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase__ = 'bert' else: raise ValueError('args.model_type should be "bert".') UpperCAmelCase__ = model.state_dict() UpperCAmelCase__ = {} for w in ["word_embeddings", "position_embeddings"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"] UpperCAmelCase__ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] UpperCAmelCase__ = state_dict[ F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight'] UpperCAmelCase__ = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"] UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"] print(F"N layers selected for distillation: {std_idx}") print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(F"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
288
1
"""simple docstring""" from math import pow def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) return current_sum, solutions_count def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 ) -> list: _snake_case = length or len(__lowerCamelCase ) _snake_case = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _snake_case , _snake_case = list_data[i + 1], list_data[i] _snake_case = True return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder UpperCAmelCase__ = 'base_with_context' def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> int: _snake_case = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _snake_case = ly_weight['''attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any ) -> Union[str, Any]: _snake_case = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = ly_weight['''attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> str: _snake_case = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCamelCase ) _snake_case = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _snake_case = weights[f'''layers_{lyr_num}'''] _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) _snake_case = ly_weight['''self_attention'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _snake_case = ly_weight['''MultiHeadDotProductAttention_0'''] _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _snake_case = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) _snake_case = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]: _snake_case = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _snake_case = jnp.tree_util.tree_map(onp.array , __lowerCamelCase ) _snake_case = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _snake_case = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) _snake_case = inference.parse_training_gin_file(__lowerCamelCase , __lowerCamelCase ) _snake_case = inference.InferenceModel(args.checkpoint_path , __lowerCamelCase ) _snake_case = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) _snake_case = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) _snake_case = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) _snake_case = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _snake_case = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __lowerCamelCase ) _snake_case = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __lowerCamelCase ) _snake_case = load_decoder(ta_checkpoint['''target''']['''decoder'''] , __lowerCamelCase ) _snake_case = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) _snake_case = SpectrogramDiffusionPipeline( notes_encoder=__lowerCamelCase , continuous_encoder=__lowerCamelCase , decoder=__lowerCamelCase , scheduler=__lowerCamelCase , melgan=__lowerCamelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F"{MODEL}/checkpoint_500000", type=str, required=False, help='Path to the original jax model checkpoint.', ) UpperCAmelCase__ = parser.parse_args() main(args)
288
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5') UpperCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = [] UpperCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]: for attribute in key.split('''.''' ): _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value elif weight_type == "running_mean": _snake_case = value elif weight_type == "running_var": _snake_case = value elif weight_type == "num_batches_tracked": _snake_case = value else: _snake_case = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]: _snake_case = [] if task == "s2t": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2T _snake_case = IGNORE_KEYS_S2T elif task == "t2s": _snake_case = None _snake_case = MAPPING_T2S _snake_case = IGNORE_KEYS_T2S elif task == "s2s": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2S _snake_case = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__lowerCamelCase , __lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue _snake_case = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: _snake_case = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2] _snake_case = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: _snake_case = '''weight_g''' elif "weight_v" in name: _snake_case = '''weight_v''' elif "bias" in name: _snake_case = '''bias''' elif "weight" in name: _snake_case = '''weight''' elif "running_mean" in name: _snake_case = '''running_mean''' elif "running_var" in name: _snake_case = '''running_var''' elif "num_batches_tracked" in name: _snake_case = '''num_batches_tracked''' else: _snake_case = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = full_name.split('''conv_layers.''' )[-1] _snake_case = name.split('''.''' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict: if config_path is not None: _snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase ) else: _snake_case = SpeechTaConfig() if task == "s2t": _snake_case = config.max_text_positions _snake_case = SpeechTaForSpeechToText(__lowerCamelCase ) elif task == "t2s": _snake_case = 18_76 _snake_case = 6_00 _snake_case = config.max_speech_positions _snake_case = SpeechTaForTextToSpeech(__lowerCamelCase ) elif task == "s2s": _snake_case = 18_76 _snake_case = config.max_speech_positions _snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: _snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) _snake_case = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _snake_case = SpeechTaFeatureExtractor() _snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _snake_case = torch.load(__lowerCamelCase ) recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
288
1
"""simple docstring""" from math import factorial def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', F"fifty-two card deck is: {combinations(52, 5)}\n", ) print( 'If a class of 40 students must be arranged into groups of', F"4 for group projects, there are {combinations(40, 4)} ways", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', F"are {combinations(10, 3)} ways that first, second and", 'third place can be awarded.', )
288
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]: _snake_case = checkpoints.load_tax_checkpoint(__lowerCamelCase ) _snake_case = flatten_dict(__lowerCamelCase ) return flax_params def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]: _snake_case = {} _snake_case = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } _snake_case = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _snake_case = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _snake_case = new_key.replace(__lowerCamelCase , __lowerCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __lowerCamelCase ) _snake_case = flax_dict[key] _snake_case = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _snake_case = torch.from_numpy(converted_dict[key].T ) else: _snake_case = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ) -> int: _snake_case = get_flax_param(__lowerCamelCase ) if not use_large: _snake_case = PixaStructVisionConfig() _snake_case = PixaStructTextConfig() else: _snake_case = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) _snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) _snake_case = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowerCamelCase ) _snake_case = PixaStructForConditionalGeneration(__lowerCamelCase ) _snake_case = rename_and_convert_flax_params(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) _snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) _snake_case = PixaStructImageProcessor() _snake_case = PixaStructProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase ) if use_large: _snake_case = 40_96 _snake_case = True # mkdir if needed os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) print('''Model saved in {}'''.format(__lowerCamelCase ) ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') UpperCAmelCase__ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
288
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase__ ( A_ ): def __lt__( self : Any , _lowerCamelCase : int ): return self[-1] < other[-1] def __eq__( self : int , _lowerCamelCase : Optional[Any] ): return self[-1] == other[-1] def _UpperCAmelCase ( __lowerCamelCase : list ) -> list: _snake_case = [] # sort into stacks for element in collection: _snake_case = Stack([element] ) _snake_case = bisect_left(__lowerCamelCase , __lowerCamelCase ) if i != len(__lowerCamelCase ): stacks[i].append(__lowerCamelCase ) else: stacks.append(__lowerCamelCase ) # use a heap-based merge to merge stack efficiently _snake_case = merge(*(reversed(__lowerCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
288
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( ) -> int: # Get the sagemaker specific mp parameters from smp_options variable. _snake_case = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. _snake_case = json.loads(__lowerCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. _snake_case = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". _snake_case = json.loads(__lowerCamelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''' , __lowerCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCAmelCase__ ( A_ ): __a = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def lowercase ( self : Optional[Any] ): super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , _lowerCamelCase , ) @cached_property def lowercase ( self : int ): logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: _snake_case = torch.device('''cpu''' ) _snake_case = 0 elif is_sagemaker_model_parallel_available(): _snake_case = smp.local_rank() _snake_case = torch.device('''cuda''' , _lowerCamelCase ) _snake_case = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) _snake_case = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) _snake_case = torch.device('''cuda''' , self.local_rank ) _snake_case = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 _snake_case = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. _snake_case = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) _snake_case = torch.device('''cuda''' , self.local_rank ) _snake_case = 1 if device.type == "cuda": torch.cuda.set_device(_lowerCamelCase ) return device @property def lowercase ( self : List[str] ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowercase ( self : int ): return not is_sagemaker_model_parallel_available() @property def lowercase ( self : Optional[int] ): return False
288
"""simple docstring""" UpperCAmelCase__ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
288
1
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger('transformers.models.speecht5') UpperCAmelCase__ = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase__ = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase__ = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase__ = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase__ = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase__ = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase__ = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase__ = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase__ = [] UpperCAmelCase__ = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase__ = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict ) -> List[Any]: for attribute in key.split('''.''' ): _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if weight_type is not None: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ).shape else: _snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case = value elif weight_type == "weight_g": _snake_case = value elif weight_type == "weight_v": _snake_case = value elif weight_type == "bias": _snake_case = value elif weight_type == "running_mean": _snake_case = value elif weight_type == "running_var": _snake_case = value elif weight_type == "num_batches_tracked": _snake_case = value else: _snake_case = value logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]: _snake_case = [] if task == "s2t": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2T _snake_case = IGNORE_KEYS_S2T elif task == "t2s": _snake_case = None _snake_case = MAPPING_T2S _snake_case = IGNORE_KEYS_T2S elif task == "s2s": _snake_case = hf_model.speechta.encoder.prenet.feature_encoder _snake_case = MAPPING_S2S _snake_case = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(__lowerCamelCase , __lowerCamelCase ): logger.info(f'''{name} was ignored''' ) continue _snake_case = False if "conv_layers" in name: load_conv_layer( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _snake_case , _snake_case = key.split('''.*.''' ) if prefix in name and suffix in name: _snake_case = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _snake_case = True if "*" in mapped_key: _snake_case = name.split(__lowerCamelCase )[0].split('''.''' )[-2] _snake_case = mapped_key.replace('''*''' , __lowerCamelCase ) if "weight_g" in name: _snake_case = '''weight_g''' elif "weight_v" in name: _snake_case = '''weight_v''' elif "bias" in name: _snake_case = '''bias''' elif "weight" in name: _snake_case = '''weight''' elif "running_mean" in name: _snake_case = '''running_mean''' elif "running_var" in name: _snake_case = '''running_var''' elif "num_batches_tracked" in name: _snake_case = '''num_batches_tracked''' else: _snake_case = None set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) continue if not is_used: unused_weights.append(__lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> List[Any]: _snake_case = full_name.split('''conv_layers.''' )[-1] _snake_case = name.split('''.''' ) _snake_case = int(items[0] ) _snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCamelCase ) @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict: if config_path is not None: _snake_case = SpeechTaConfig.from_pretrained(__lowerCamelCase ) else: _snake_case = SpeechTaConfig() if task == "s2t": _snake_case = config.max_text_positions _snake_case = SpeechTaForSpeechToText(__lowerCamelCase ) elif task == "t2s": _snake_case = 18_76 _snake_case = 6_00 _snake_case = config.max_speech_positions _snake_case = SpeechTaForTextToSpeech(__lowerCamelCase ) elif task == "s2s": _snake_case = 18_76 _snake_case = config.max_speech_positions _snake_case = SpeechTaForSpeechToSpeech(__lowerCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: _snake_case = SpeechTaTokenizer(__lowerCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _snake_case = AddedToken('''<mask>''' , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) _snake_case = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _snake_case = SpeechTaFeatureExtractor() _snake_case = SpeechTaProcessor(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) _snake_case = torch.load(__lowerCamelCase ) recursively_load_weights(fairseq_checkpoint['''model'''] , __lowerCamelCase , __lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(__lowerCamelCase ) model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase__ = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
288
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Dict=10 , _lowerCamelCase : Tuple=[10, 20, 30, 40] , _lowerCamelCase : int=[1, 1, 2, 1] , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Dict=None , ): _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = embeddings_size _snake_case = hidden_sizes _snake_case = depths _snake_case = is_training _snake_case = use_labels _snake_case = hidden_act _snake_case = num_labels _snake_case = scope _snake_case = len(_lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.num_labels ) _snake_case = self.get_config() return config, pixel_values, labels def lowercase ( self : Tuple ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[Any] ): _snake_case = TFResNetModel(config=_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ): _snake_case = self.num_labels _snake_case = TFResNetForImageClassification(_lowerCamelCase ) _snake_case = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : Tuple ): _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __a = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowercase ( self : List[Any] ): _snake_case = TFResNetModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def lowercase ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase ( self : List[Any] ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def lowercase ( self : Any ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def lowercase ( self : List[str] ): pass def lowercase ( self : int ): _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(_lowerCamelCase ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): def check_hidden_states_output(_lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str ): _snake_case = model_class(_lowerCamelCase ) _snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _snake_case = layer_type _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def lowercase ( self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFResNetModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def _UpperCAmelCase ( ) -> Union[str, Any]: _snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase ( self : List[Any] ): _snake_case = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=_lowerCamelCase , return_tensors='''tf''' ) # forward pass _snake_case = model(**_lowerCamelCase ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) _snake_case = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCamelCase , atol=1e-4 ) )
288
1
"""simple docstring""" UpperCAmelCase__ = 256 # Modulus to hash a string UpperCAmelCase__ = 1000003 def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> bool: _snake_case = len(__lowerCamelCase ) _snake_case = len(__lowerCamelCase ) if p_len > t_len: return False _snake_case = 0 _snake_case = 0 _snake_case = 1 # Calculating the hash of pattern and substring of text for i in range(__lowerCamelCase ): _snake_case = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _snake_case = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _snake_case = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _snake_case = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def _UpperCAmelCase ( ) -> None: _snake_case = '''abc1abc12''' _snake_case = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' _snake_case = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) and not rabin_karp(__lowerCamelCase , __lowerCamelCase ) # Test 2) _snake_case = '''ABABX''' _snake_case = '''ABABZABABYABABX''' assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) # Test 3) _snake_case = '''AAAB''' _snake_case = '''ABAAAAAB''' assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) # Test 4) _snake_case = '''abcdabcy''' _snake_case = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) # Test 5) _snake_case = '''Lü''' _snake_case = '''Lüsai''' assert rabin_karp(__lowerCamelCase , __lowerCamelCase ) _snake_case = '''Lue''' assert not rabin_karp(__lowerCamelCase , __lowerCamelCase ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
288
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCAmelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCAmelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCAmelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCAmelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ = Path(tmpdirname) UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCAmelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCAmelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=1000, tgt_vocab_size=1000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCAmelCase__ = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCAmelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCAmelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
288
1
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any]=2_81_23 ) -> List[str]: _snake_case = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i _snake_case = set() _snake_case = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(__lowerCamelCase ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = limit + 1 _snake_case = [0] * limit for first_term in range(1 , __lowerCamelCase ): for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _snake_case = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" UpperCAmelCase__ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
288
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
1
"""simple docstring""" import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ = logging.getLogger() def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Any: _snake_case = {} _snake_case = os.path.join(__lowerCamelCase , '''all_results.json''' ) if os.path.exists(__lowerCamelCase ): with open(__lowerCamelCase , '''r''' ) as f: _snake_case = json.load(__lowerCamelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results UpperCAmelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class lowerCAmelCase__ ( A_ ): def lowercase ( self : str ): import xla_spawn _snake_case = self.get_auto_remove_tmp_dir() _snake_case = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ): _snake_case = time() xla_spawn.main() _snake_case = time() _snake_case = get_results(_lowerCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def lowercase ( self : Optional[int] ): import xla_spawn _snake_case = ''' ./tests/test_trainer_tpu.py --num_cores=8 ./tests/test_trainer_tpu.py '''.split() with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ): xla_spawn.main()
288
"""simple docstring""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging UpperCAmelCase__ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt'] UpperCAmelCase__ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('0.9.0'): raise Exception('requires fairseq >= 0.9.0') logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = ' Hello world! cécé herlolip' UpperCAmelCase__ = [ ('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'), ('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'), ('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'), ('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'), ] def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]: _snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> int: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> str: _snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' ) _snake_case = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) _snake_case = emb.weight.data return lin_layer @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any]=None ) -> List[Any]: if not os.path.exists(__lowerCamelCase ): _snake_case = torch.hub.load('''pytorch/fairseq''' , __lowerCamelCase ).eval() else: _snake_case = load_xsum_checkpoint(__lowerCamelCase ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: _snake_case = checkpoint_path.replace('''.''' , '''-''' ) _snake_case = BartConfig.from_pretrained(__lowerCamelCase ) _snake_case = bart.encode(__lowerCamelCase ).unsqueeze(0 ) _snake_case = BartTokenizer.from_pretrained(__lowerCamelCase ).encode(__lowerCamelCase , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__lowerCamelCase , __lowerCamelCase ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": _snake_case = bart.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = BartForSequenceClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = bart.predict('''mnli''' , __lowerCamelCase , return_logits=__lowerCamelCase ) _snake_case = model(__lowerCamelCase )[0] # logits else: # no classification heads to worry about _snake_case = bart.model.state_dict() remove_ignore_keys_(__lowerCamelCase ) _snake_case = state_dict['''decoder.embed_tokens.weight'''] _snake_case = bart.extract_features(__lowerCamelCase ) if hf_checkpoint_name == "facebook/bart-large": _snake_case = BartModel(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) _snake_case = model(__lowerCamelCase ).model[0] else: _snake_case = BartForConditionalGeneration(__lowerCamelCase ).eval() # an existing summarization ckpt model.model.load_state_dict(__lowerCamelCase ) if hasattr(__lowerCamelCase , '''lm_head''' ): _snake_case = make_linear_from_emb(model.model.shared ) _snake_case = model.model(__lowerCamelCase )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum' ) UpperCAmelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
288
1
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def _UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : int = 1_60_00 ) -> Dict: _snake_case = int(round(sample_rate * max_length ) ) if len(__lowerCamelCase ) <= sample_length: return wav _snake_case = randint(0 , len(__lowerCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class lowerCAmelCase__ : __a = field(default=A_ , metadata={"""help""": """Name of a dataset from the datasets package"""} ) __a = field( default=A_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) __a = field( default=A_ , metadata={"""help""": """A file containing the training audio paths and labels."""} ) __a = field( default=A_ , metadata={"""help""": """A file containing the validation audio paths and labels."""} ) __a = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) __a = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) __a = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) __a = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} ) __a = field( default=A_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __a = field( default=A_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) __a = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class lowerCAmelCase__ : __a = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) __a = field( default=A_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a = field( default=A_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} ) __a = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __a = field( default=A_ , metadata={"""help""": """Name or path of preprocessor config."""} ) __a = field( default=A_ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} ) __a = field( default=A_ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} ) __a = field( default=A_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) __a = field( default=A_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) __a = field( default=A_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase ( self : List[Any] ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , _lowerCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def _UpperCAmelCase ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , __lowerCamelCase , __lowerCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _snake_case = training_args.get_process_log_level() logger.setLevel(__lowerCamelCase ) transformers.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _snake_case = DatasetDict() _snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'''{', '.join(raw_datasets['train'].column_names )}.''' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'''{', '.join(raw_datasets['train'].column_names )}.''' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _snake_case = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _snake_case = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _snake_case = feature_extractor.model_input_names[0] def train_transforms(__lowerCamelCase : List[str] ): _snake_case = [] for audio in batch[data_args.audio_column_name]: _snake_case = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__lowerCamelCase ) _snake_case = feature_extractor(__lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _snake_case = {model_input_name: inputs.get(__lowerCamelCase )} _snake_case = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__lowerCamelCase : Tuple ): _snake_case = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _snake_case = feature_extractor(__lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _snake_case = {model_input_name: inputs.get(__lowerCamelCase )} _snake_case = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _snake_case = raw_datasets['''train'''].features[data_args.label_column_name].names _snake_case , _snake_case = {}, {} for i, label in enumerate(__lowerCamelCase ): _snake_case = str(__lowerCamelCase ) _snake_case = label # Load the accuracy metric from the datasets package _snake_case = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__lowerCamelCase : Dict ): _snake_case = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__lowerCamelCase , references=eval_pred.label_ids ) _snake_case = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel=__lowerCamelCase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _snake_case = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _snake_case = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__lowerCamelCase , output_all_columns=__lowerCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: _snake_case = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__lowerCamelCase , output_all_columns=__lowerCamelCase ) # Initialize our trainer _snake_case = Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , ) # Training if training_args.do_train: _snake_case = None if training_args.resume_from_checkpoint is not None: _snake_case = training_args.resume_from_checkpoint elif last_checkpoint is not None: _snake_case = last_checkpoint _snake_case = trainer.train(resume_from_checkpoint=__lowerCamelCase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _snake_case = trainer.evaluate() trainer.log_metrics('''eval''' , __lowerCamelCase ) trainer.save_metrics('''eval''' , __lowerCamelCase ) # Write model card and (optionally) push to hub _snake_case = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCamelCase ) else: trainer.create_model_card(**__lowerCamelCase ) if __name__ == "__main__": main()
288
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Any: stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 ) return arr def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: _snake_case , _snake_case = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: _snake_case = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(__lowerCamelCase , i + t , (__lowerCamelCase) ) # Recursively sort first 2/3 elements stooge(__lowerCamelCase , __lowerCamelCase , (h - t) ) if __name__ == "__main__": UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase__ = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
288
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase__ ( A_ ): __a = ["""pixel_values"""] def __init__( self : List[str] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 255 , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : bool = True , **_lowerCamelCase : Optional[Any] , ): super().__init__(**_lowerCamelCase ) _snake_case = size if size is not None else {'''shortest_edge''': 224} _snake_case = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase ) _snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _snake_case = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' ) _snake_case = do_resize _snake_case = size _snake_case = resample _snake_case = do_center_crop _snake_case = crop_size _snake_case = do_rescale _snake_case = rescale_factor _snake_case = do_normalize _snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _snake_case = image_std if image_std is not None else OPENAI_CLIP_STD _snake_case = do_convert_rgb def lowercase ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ): _snake_case = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) _snake_case = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase ) return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ): _snake_case = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : str , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ): return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ): return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : int = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCamelCase : Optional[Any] , ): _snake_case = do_resize if do_resize is not None else self.do_resize _snake_case = size if size is not None else self.size _snake_case = get_size_dict(_lowerCamelCase , param_name='''size''' , default_to_square=_lowerCamelCase ) _snake_case = resample if resample is not None else self.resample _snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop _snake_case = crop_size if crop_size is not None else self.crop_size _snake_case = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase ) _snake_case = do_rescale if do_rescale is not None else self.do_rescale _snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor _snake_case = do_normalize if do_normalize is not None else self.do_normalize _snake_case = image_mean if image_mean is not None else self.image_mean _snake_case = image_std if image_std is not None else self.image_std _snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _snake_case = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _snake_case = [convert_to_rgb(_lowerCamelCase ) for image in images] # All transformations expect numpy arrays. _snake_case = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: _snake_case = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_center_crop: _snake_case = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images] if do_rescale: _snake_case = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images] if do_normalize: _snake_case = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images] _snake_case = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] _snake_case = {'''pixel_values''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
288
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]: return 1 / (1 + np.exp(-z )) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]: return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]: _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) ) def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]: _snake_case = np.zeros(x.shape[1] ) for iterations in range(__lowerCamelCase ): _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = np.dot(x.T , h - y ) / y.size _snake_case = theta - alpha * gradient # updating the weights _snake_case = np.dot(__lowerCamelCase , __lowerCamelCase ) _snake_case = sigmoid_function(__lowerCamelCase ) _snake_case = cost_function(__lowerCamelCase , __lowerCamelCase ) if iterations % 1_00 == 0: print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": UpperCAmelCase__ = datasets.load_iris() UpperCAmelCase__ = iris.data[:, :2] UpperCAmelCase__ = (iris.target != 0) * 1 UpperCAmelCase__ = 0.1 UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000) print('theta: ', theta) # printing the theta i.e our weights vector def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]: return sigmoid_function( np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
288
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): __a = """vision-encoder-decoder""" __a = True def __init__( self : Dict , **_lowerCamelCase : Dict ): super().__init__(**_lowerCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _snake_case = kwargs.pop('''encoder''' ) _snake_case = encoder_config.pop('''model_type''' ) _snake_case = kwargs.pop('''decoder''' ) _snake_case = decoder_config.pop('''model_type''' ) _snake_case = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) _snake_case = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) _snake_case = True @classmethod def lowercase ( cls : Tuple , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : Dict ): logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) _snake_case = True _snake_case = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase ) def lowercase ( self : Dict ): _snake_case = copy.deepcopy(self.__dict__ ) _snake_case = self.encoder.to_dict() _snake_case = self.decoder.to_dict() _snake_case = self.__class__.model_type return output class lowerCAmelCase__ ( A_ ): __a = version.parse("""1.11""" ) @property def lowercase ( self : str ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase ( self : List[Any] ): return 1e-4 @property def lowercase ( self : Optional[Any] ): return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class lowerCAmelCase__ ( A_ ): @property def lowercase ( self : Dict ): _snake_case = OrderedDict() _snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} _snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} _snake_case = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def lowercase ( self : str , _lowerCamelCase : "PreTrainedTokenizerBase" , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , ): import torch _snake_case = OrderedDict() _snake_case = super().generate_dummy_inputs( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) _snake_case , _snake_case = dummy_input['''input_ids'''].shape _snake_case = (batch, encoder_sequence, self._config.encoder_hidden_size) _snake_case = dummy_input.pop('''input_ids''' ) _snake_case = dummy_input.pop('''attention_mask''' ) _snake_case = torch.zeros(_lowerCamelCase ) return common_inputs class lowerCAmelCase__ ( A_ ): @property def lowercase ( self : Optional[int] ): pass def lowercase ( self : List[Any] , _lowerCamelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase ) def lowercase ( self : Any , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : str = "default" ): _snake_case = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
288
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'} UpperCAmelCase__ = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCAmelCase__ = { 'google/rembert': 256, } class lowerCAmelCase__ ( A_ ): __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ): super().__init__( do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = do_lower_case _snake_case = remove_space _snake_case = keep_accents _snake_case = vocab_file _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(_lowerCamelCase ) @property def lowercase ( self : int ): return len(self.sp_model ) def lowercase ( self : Any ): _snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _snake_case = self.__dict__.copy() _snake_case = None return state def __setstate__( self : List[str] , _lowerCamelCase : Tuple ): _snake_case = d _snake_case = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ): _snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase ) return pieces def lowercase ( self : str , _lowerCamelCase : str ): return self.sp_model.PieceToId(_lowerCamelCase ) def lowercase ( self : List[str] , _lowerCamelCase : int ): return self.sp_model.IdToPiece(_lowerCamelCase ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ): _snake_case = self.sp_model.decode_pieces(_lowerCamelCase ) return out_string def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1] def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): _snake_case = [self.sep_token_id] _snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): if not os.path.isdir(_lowerCamelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) ) return _snake_case = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
288
1
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): __a = ["""input_values""", """attention_mask"""] def __init__( self : str , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 16000 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : bool = False , _lowerCamelCase : int = 80 , _lowerCamelCase : int = 16 , _lowerCamelCase : int = 64 , _lowerCamelCase : str = "hann_window" , _lowerCamelCase : float = 1.0 , _lowerCamelCase : float = 80 , _lowerCamelCase : float = 7600 , _lowerCamelCase : float = 1e-10 , _lowerCamelCase : int = 2 , _lowerCamelCase : bool = True , **_lowerCamelCase : Any , ): super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase ) _snake_case = do_normalize _snake_case = return_attention_mask _snake_case = num_mel_bins _snake_case = hop_length _snake_case = win_length _snake_case = win_function _snake_case = frame_signal_scale _snake_case = fmin _snake_case = fmax _snake_case = mel_floor _snake_case = reduction_factor _snake_case = win_length * sampling_rate // 1000 _snake_case = hop_length * sampling_rate // 1000 _snake_case = optimal_fft_length(self.sample_size ) _snake_case = (self.n_fft // 2) + 1 _snake_case = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase ) _snake_case = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _lowerCamelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowercase ( _lowerCamelCase : List[np.ndarray] , _lowerCamelCase : List[np.ndarray] , _lowerCamelCase : float = 0.0 ): if attention_mask is not None: _snake_case = np.array(_lowerCamelCase , np.intaa ) _snake_case = [] for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ): _snake_case = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: _snake_case = padding_value normed_input_values.append(_lowerCamelCase ) else: _snake_case = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def lowercase ( self : Union[str, Any] , _lowerCamelCase : np.ndarray , ): _snake_case = spectrogram( _lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : List[str] , _lowerCamelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _lowerCamelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Tuple , ): if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: _snake_case = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) else: _snake_case = None if audio_target is not None: _snake_case = self._process_audio( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , ) if inputs is None: return inputs_target else: _snake_case = inputs_target['''input_values'''] _snake_case = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: _snake_case = decoder_attention_mask return inputs def lowercase ( self : Union[str, Any] , _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase : bool = False , _lowerCamelCase : Union[bool, str, PaddingStrategy] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Optional[int] , ): _snake_case = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _snake_case = is_batched_numpy or ( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _snake_case = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): _snake_case = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _snake_case = speech.astype(np.floataa ) # always return batch if not is_batched: _snake_case = [speech] # needed to make pad() work on spectrogram inputs _snake_case = self.feature_size # convert into correct format for padding if is_target: _snake_case = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech] _snake_case = BatchFeature({'''input_values''': features} ) _snake_case = self.num_mel_bins else: _snake_case = BatchFeature({'''input_values''': speech} ) _snake_case = self.pad( _lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) _snake_case = feature_size_hack # convert input values to correct format _snake_case = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): _snake_case = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_lowerCamelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _snake_case = [array.astype(np.floataa ) for array in input_values] elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _snake_case = input_values.astype(np.floataa ) # convert attention_mask to correct format _snake_case = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: _snake_case = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _snake_case = ( attention_mask if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _snake_case = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=_lowerCamelCase , padding_value=self.padding_value ) if return_tensors is not None: _snake_case = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs def lowercase ( self : Optional[int] ): _snake_case = super().to_dict() # Don't serialize these as they are derived from the other properties. _snake_case = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
288
"""simple docstring""" from math import pow def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _snake_case = int(pow(__lowerCamelCase , __lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _snake_case , _snake_case = backtrack( __lowerCamelCase , __lowerCamelCase , current_number + 1 , __lowerCamelCase , __lowerCamelCase ) return current_sum, solutions_count def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCamelCase , __lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
288
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt') UpperCAmelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : __a = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) __a = field( default=A_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) __a = field( default=A_ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , ) __a = field(default=A_ , metadata={"""help""": """A folder containing the training data."""} ) __a = field(default=A_ , metadata={"""help""": """A folder containing the validation data."""} ) __a = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) __a = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} ) __a = field( default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , ) __a = field( default=A_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __a = field( default=A_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowercase ( self : Union[str, Any] ): _snake_case = {} if self.train_dir is not None: _snake_case = self.train_dir if self.validation_dir is not None: _snake_case = self.validation_dir _snake_case = data_files if data_files else None @dataclass class lowerCAmelCase__ : __a = field( default=A_ , metadata={ """help""": ( """The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """ """checkpoint identifier on the hub. """ """Don't set if you want to train a model from scratch.""" ) } , ) __a = field( default=A_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A_ )} , ) __a = field( default=A_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a = field( default=A_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) __a = field( default=A_ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , ) __a = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __a = field(default=A_ , metadata={"""help""": """Name or path of preprocessor config."""} ) __a = field( default=A_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) __a = field( default=A_ , metadata={ """help""": ( """The size (resolution) of each image. If not specified, will use `image_size` of the configuration.""" ) } , ) __a = field( default=A_ , metadata={ """help""": ( """The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.""" ) } , ) __a = field( default=A_ , metadata={"""help""": """Stride to use for the encoder."""} , ) class lowerCAmelCase__ : def __init__( self : Any , _lowerCamelCase : Tuple=192 , _lowerCamelCase : Optional[Any]=32 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Any=0.6 ): _snake_case = input_size _snake_case = mask_patch_size _snake_case = model_patch_size _snake_case = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) _snake_case = self.input_size // self.mask_patch_size _snake_case = self.mask_patch_size // self.model_patch_size _snake_case = self.rand_size**2 _snake_case = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): _snake_case = np.random.permutation(self.token_count )[: self.mask_count] _snake_case = np.zeros(self.token_count , dtype=_lowerCamelCase ) _snake_case = 1 _snake_case = mask.reshape((self.rand_size, self.rand_size) ) _snake_case = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[str]: _snake_case = torch.stack([example['''pixel_values'''] for example in examples] ) _snake_case = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _UpperCAmelCase ( ) -> str: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , __lowerCamelCase , __lowerCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _snake_case = training_args.get_process_log_level() logger.setLevel(__lowerCamelCase ) transformers.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. _snake_case = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _snake_case = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowerCamelCase ) and data_args.train_val_split > 0.0: _snake_case = ds['''train'''].train_test_split(data_args.train_val_split ) _snake_case = split['''train'''] _snake_case = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _snake_case = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _snake_case = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowerCamelCase ) elif model_args.model_name_or_path: _snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase ) else: _snake_case = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowerCamelCase , '''decoder_type''' ): _snake_case = '''simmim''' # adapt config _snake_case = model_args.image_size if model_args.image_size is not None else config.image_size _snake_case = model_args.patch_size if model_args.patch_size is not None else config.patch_size _snake_case = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _snake_case = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowerCamelCase ) elif model_args.model_name_or_path: _snake_case = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase ) else: _snake_case = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _snake_case = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _snake_case = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) _snake_case = AutoModelForMaskedImageModeling.from_config(__lowerCamelCase ) if training_args.do_train: _snake_case = ds['''train'''].column_names else: _snake_case = ds['''validation'''].column_names if data_args.image_column_name is not None: _snake_case = data_args.image_column_name elif "image" in column_names: _snake_case = '''image''' elif "img" in column_names: _snake_case = '''img''' else: _snake_case = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _snake_case = Compose( [ Lambda(lambda __lowerCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _snake_case = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowerCamelCase : Union[str, Any] ): _snake_case = [transforms(__lowerCamelCase ) for image in examples[image_column_name]] _snake_case = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: _snake_case = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowerCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: _snake_case = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowerCamelCase ) # Initialize our trainer _snake_case = Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , ) # Training if training_args.do_train: _snake_case = None if training_args.resume_from_checkpoint is not None: _snake_case = training_args.resume_from_checkpoint elif last_checkpoint is not None: _snake_case = last_checkpoint _snake_case = trainer.train(resume_from_checkpoint=__lowerCamelCase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _snake_case = trainer.evaluate() trainer.log_metrics('''eval''' , __lowerCamelCase ) trainer.save_metrics('''eval''' , __lowerCamelCase ) # Write model card and (optionally) push to hub _snake_case = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowerCamelCase ) else: trainer.create_model_card(**__lowerCamelCase ) if __name__ == "__main__": main()
288
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Any ): _snake_case = tempfile.mkdtemp() # fmt: off _snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on _snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) _snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] _snake_case = {'''unk_token''': '''<unk>'''} _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_lowerCamelCase ) ) _snake_case = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _snake_case = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Tuple , **_lowerCamelCase : Any ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : str , **_lowerCamelCase : Any ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : int , **_lowerCamelCase : Optional[int] ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowercase ( self : Any ): _snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase ( self : Optional[Any] ): _snake_case = self.get_tokenizer() _snake_case = self.get_rust_tokenizer() _snake_case = self.get_image_processor() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) _snake_case = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = self.prepare_image_inputs() _snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' ) _snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = processor(text=_lowerCamelCase ) _snake_case = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase ( self : Any ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowercase ( self : List[str] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case = processor.batch_decode(_lowerCamelCase ) _snake_case = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.get_image_processor() _snake_case = self.get_tokenizer() _snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) _snake_case = '''lower newer''' _snake_case = self.prepare_image_inputs() _snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
288
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase__ = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
288
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort UpperCAmelCase__ = '1' UpperCAmelCase__ = '0' UpperCAmelCase__ = '1' UpperCAmelCase__ = ort.SessionOptions() UpperCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') UpperCAmelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider'] UpperCAmelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) UpperCAmelCase__ = ort.RunOptions() UpperCAmelCase__ = 128 UpperCAmelCase__ = 1 UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') UpperCAmelCase__ = time.time() UpperCAmelCase__ = 2000 UpperCAmelCase__ = {} for iter in range(max_iters): UpperCAmelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
288
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ = sys.version_info >= (3, 10) def _UpperCAmelCase ( __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None ) -> List[str]: return field(default_factory=lambda: default , metadata=__lowerCamelCase ) @dataclass class lowerCAmelCase__ : __a = 42 __a = 42 __a = 42 __a = 42 @dataclass class lowerCAmelCase__ : __a = 42 __a = field(default="""toto""" , metadata={"""help""": """help message"""} ) @dataclass class lowerCAmelCase__ : __a = False __a = True __a = None class lowerCAmelCase__ ( A_ ): __a = """titi""" __a = """toto""" class lowerCAmelCase__ ( A_ ): __a = """titi""" __a = """toto""" __a = 42 @dataclass class lowerCAmelCase__ : __a = "toto" def lowercase ( self : Union[str, Any] ): _snake_case = BasicEnum(self.foo ) @dataclass class lowerCAmelCase__ : __a = "toto" def lowercase ( self : Optional[Any] ): _snake_case = MixedTypeEnum(self.foo ) @dataclass class lowerCAmelCase__ : __a = None __a = field(default=A_ , metadata={"""help""": """help message"""} ) __a = None __a = list_field(default=[] ) __a = list_field(default=[] ) @dataclass class lowerCAmelCase__ : __a = list_field(default=[] ) __a = list_field(default=[1, 2, 3] ) __a = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) __a = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowerCAmelCase__ : __a = field() __a = field() __a = field() def lowercase ( self : Dict ): _snake_case = BasicEnum(self.required_enum ) @dataclass class lowerCAmelCase__ : __a = 42 __a = field() __a = None __a = field(default="""toto""" , metadata={"""help""": """help message"""} ) __a = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] ) if is_python_no_less_than_3_10: @dataclass class lowerCAmelCase__ : __a = False __a = True __a = None @dataclass class lowerCAmelCase__ : __a = None __a = field(default=A_ , metadata={"""help""": """help message"""} ) __a = None __a = list_field(default=[] ) __a = list_field(default=[] ) class lowerCAmelCase__ ( unittest.TestCase ): def lowercase ( self : Any , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _snake_case = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} _snake_case = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Optional[int] ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_snake_case) , ) = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def lowercase ( self : Union[str, Any] ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) _snake_case = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: _snake_case = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) _snake_case = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) _snake_case = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) _snake_case = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) _snake_case = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def lowercase ( self : Any ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _snake_case = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _snake_case = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _snake_case = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _snake_case = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) _snake_case = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase ( self : Optional[int] ): @dataclass class lowerCAmelCase__ : __a = "toto" _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _snake_case = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _snake_case = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def lowercase ( self : Optional[Any] ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _snake_case = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase ( self : int ): _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) _snake_case = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: _snake_case = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) _snake_case = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase ( self : List[Any] ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Dict ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : int ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _snake_case = parser.parse_dict(_lowerCamelCase )[0] _snake_case = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Dict ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def lowercase ( self : Optional[Any] ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _snake_case = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Any ): _snake_case = HfArgumentParser(_lowerCamelCase ) _snake_case = { '''foo''': 12, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _snake_case = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) _snake_case = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _snake_case = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
288
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig UpperCAmelCase__ = logging.getLogger(__name__) class lowerCAmelCase__ ( A_ ): __a = """masked_bert""" def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ): super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = hidden_act _snake_case = intermediate_size _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = initializer_range _snake_case = layer_norm_eps _snake_case = pruning_method _snake_case = mask_init _snake_case = mask_scale
288
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase__ ( A_ ): def __init__( self : int , _lowerCamelCase : VQModel , _lowerCamelCase : UNetaDModel , _lowerCamelCase : DDIMScheduler ): super().__init__() self.register_modules(vqvae=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase ) @torch.no_grad() def __call__( self : str , _lowerCamelCase : int = 1 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : float = 0.0 , _lowerCamelCase : int = 50 , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , **_lowerCamelCase : Union[str, Any] , ): _snake_case = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCamelCase , ) _snake_case = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _snake_case = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_lowerCamelCase ) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature _snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _snake_case = {} if accepts_eta: _snake_case = eta for t in self.progress_bar(self.scheduler.timesteps ): _snake_case = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) # predict the noise residual _snake_case = self.unet(_lowerCamelCase , _lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _snake_case = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample # decode the image latents with the VAE _snake_case = self.vqvae.decode(_lowerCamelCase ).sample _snake_case = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
288
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): __a = None def _UpperCAmelCase ( __lowerCamelCase : "pyspark.sql.DataFrame" , __lowerCamelCase : List[int] , ) -> Optional[int]: import pyspark def generate_fn(): _snake_case = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case = partition_df.collect() _snake_case = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCAmelCase__ ( _BaseExamplesIterable ): def __init__( self : Optional[int] , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : List[Any]=None , ): _snake_case = df _snake_case = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): yield from self.generate_examples_fn() def lowercase ( self : Any , _lowerCamelCase : np.random.Generator ): _snake_case = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) def lowercase ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int ): _snake_case = self.split_shard_indices_by_worker(_lowerCamelCase , _lowerCamelCase ) return SparkExamplesIterable(self.df , partition_order=_lowerCamelCase ) @property def lowercase ( self : List[str] ): return len(self.partition_order ) class lowerCAmelCase__ ( datasets.DatasetBuilder ): __a = SparkConfig def __init__( self : str , _lowerCamelCase : "pyspark.sql.DataFrame" , _lowerCamelCase : str = None , _lowerCamelCase : str = None , **_lowerCamelCase : List[str] , ): import pyspark _snake_case = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case = df _snake_case = working_dir super().__init__( cache_dir=_lowerCamelCase , config_name=str(self.df.semanticHash() ) , **_lowerCamelCase , ) def lowercase ( self : str ): # Returns the path of the created file. def create_cache_and_write_probe(_lowerCamelCase : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_lowerCamelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_lowerCamelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def lowercase ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def lowercase ( self : Union[str, Any] , _lowerCamelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowercase ( self : Dict , _lowerCamelCase : List[Any] ): import pyspark def get_arrow_batch_size(_lowerCamelCase : List[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case = self.df.count() _snake_case = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case = ( self.df.limit(_lowerCamelCase ) .repartition(1 ) .mapInArrow(_lowerCamelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case = min(_lowerCamelCase , int(approx_total_size / max_shard_size ) ) _snake_case = self.df.repartition(_lowerCamelCase ) def lowercase ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , ): import pyspark _snake_case = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case = os.path.join(self._working_dir , os.path.basename(_lowerCamelCase ) ) if self._working_dir else fpath _snake_case = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case = self.config.features _snake_case = self._writer_batch_size _snake_case = self._fs.storage_options def write_arrow(_lowerCamelCase : Tuple ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case = pyspark.TaskContext().taskAttemptId() _snake_case = next(_lowerCamelCase , _lowerCamelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case = 0 _snake_case = writer_class( features=_lowerCamelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([first_batch] ) writer.write_table(_lowerCamelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=_lowerCamelCase , storage_options=_lowerCamelCase , embed_local_files=_lowerCamelCase , ) _snake_case = pa.Table.from_batches([batch] ) writer.write_table(_lowerCamelCase ) if writer._num_bytes > 0: _snake_case , _snake_case = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_lowerCamelCase ) ): _snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , os.path.basename(_lowerCamelCase ) ) shutil.move(_lowerCamelCase , _lowerCamelCase ) _snake_case = ( self.df.mapInArrow(_lowerCamelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowercase ( self : int , _lowerCamelCase : "datasets.SplitGenerator" , _lowerCamelCase : str = "arrow" , _lowerCamelCase : Optional[Union[str, int]] = None , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : List[Any] , ): self._validate_cache_dir() _snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_lowerCamelCase ) _snake_case = not is_remote_filesystem(self._fs ) _snake_case = os.path.join if is_local else posixpath.join _snake_case = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case = path_join(self._output_dir , _lowerCamelCase ) _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = [] _snake_case = [] for task_id, content in self._prepare_split_single(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_lowerCamelCase ) _snake_case = total_num_examples _snake_case = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , ): rename( _lowerCamelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _snake_case = [] _snake_case = 0 for i in range(len(_lowerCamelCase ) ): _snake_case , _snake_case = task_id_and_num_shards[i] for shard_id in range(_lowerCamelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_lowerCamelCase , len(_lowerCamelCase ) ).map(lambda _lowerCamelCase : _rename_shard(*_lowerCamelCase ) ).collect() else: # don't use any pattern _snake_case = 0 _snake_case = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(_lowerCamelCase , '''''' ) , ) def lowercase ( self : List[str] , _lowerCamelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
288
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCAmelCase__ = {'tokenization_bertweet': ['BertweetTokenizer']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
288
"""simple docstring""" from math import sqrt def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int: _snake_case = 0 _snake_case = 0 _snake_case = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowerCamelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"{solution() = }")
288
1
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _UpperCAmelCase ( __lowerCamelCase : int = 3 ) -> qiskit.result.counts.Counts: if isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(__lowerCamelCase ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) _snake_case = QuantumRegister(__lowerCamelCase , '''qr''' ) _snake_case = ClassicalRegister(__lowerCamelCase , '''cr''' ) _snake_case = QuantumCircuit(__lowerCamelCase , __lowerCamelCase ) _snake_case = number_of_qubits for i in range(__lowerCamelCase ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__lowerCamelCase ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __lowerCamelCase , __lowerCamelCase ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__lowerCamelCase , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__lowerCamelCase , __lowerCamelCase ) # simulate with 10000 shots _snake_case = Aer.get_backend('''qasm_simulator''' ) _snake_case = execute(__lowerCamelCase , __lowerCamelCase , shots=1_00_00 ) return job.result().get_counts(__lowerCamelCase ) if __name__ == "__main__": print( F"Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}" )
288
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]: _snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple: for i in range(config.num_hidden_layers ): if base_model: _snake_case = '''''' else: _snake_case = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _snake_case = in_proj_weight[ : config.hidden_size, : ] _snake_case = in_proj_bias[: config.hidden_size] _snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _snake_case = in_proj_weight[ -config.hidden_size :, : ] _snake_case = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple: _snake_case = dct.pop(__lowerCamelCase ) _snake_case = val def _UpperCAmelCase ( ) -> Dict: _snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str: _snake_case = DeiTConfig() # all deit models have fine-tuned heads _snake_case = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _snake_case = 10_00 _snake_case = '''huggingface/label-files''' _snake_case = '''imagenet-1k-id2label.json''' _snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) ) _snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case = idalabel _snake_case = {v: k for k, v in idalabel.items()} _snake_case = int(deit_name[-6:-4] ) _snake_case = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _snake_case = 1_92 _snake_case = 7_68 _snake_case = 12 _snake_case = 3 elif deit_name[9:].startswith('''small''' ): _snake_case = 3_84 _snake_case = 15_36 _snake_case = 12 _snake_case = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _snake_case = 10_24 _snake_case = 40_96 _snake_case = 24 _snake_case = 16 # load original model from timm _snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _snake_case = timm_model.state_dict() _snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # load HuggingFace model _snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _snake_case = int( (2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size ) _snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) _snake_case = encoding['''pixel_values'''] _snake_case = model(__lowerCamelCase ) _snake_case = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
288
1